blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d187fb956b0be7288f7c4d671abb7300d0dfefa
|
99dcb18a9e3ea367272f740b8cbf3c34285a0c08
|
/google/cloud/aiplatform/docker_utils/errors.py
|
39760921f3c9eb631a78c6231ba25becc47462f3
|
[
"Apache-2.0"
] |
permissive
|
googleapis/python-aiplatform
|
926a4873f35dbea15b2fd86c0e16b5e6556d803e
|
76b95b92c1d3b87c72d754d8c02b1bca652b9a27
|
refs/heads/main
| 2023-08-19T23:49:02.180075
| 2023-08-19T13:25:59
| 2023-08-19T13:27:27
| 298,017,988
| 418
| 240
|
Apache-2.0
| 2023-09-14T21:08:33
| 2020-09-23T15:43:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,847
|
py
|
errors.py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
from typing import List, NoReturn
class Error(Exception):
"""A base exception for all user recoverable errors."""
def __init__(self, *args, **kwargs):
"""Initialize an Error."""
self.exit_code = kwargs.get("exit_code", 1)
class DockerError(Error):
"""Exception that passes info on a failed Docker command."""
def __init__(self, message, cmd, exit_code):
super(DockerError, self).__init__(message)
self.message = message
self.cmd = cmd
self.exit_code = exit_code
def raise_docker_error_with_command(command: List[str], return_code: int) -> NoReturn:
"""Raises DockerError with the given command and return code.
Args:
command (List(str)):
Required. The docker command that fails.
return_code (int):
Required. The return code from the command.
Raises:
DockerError which error message populated by the given command and return code.
"""
error_msg = textwrap.dedent(
"""
Docker failed with error code {code}.
Command: {cmd}
""".format(
code=return_code, cmd=" ".join(command)
)
)
raise DockerError(error_msg, command, return_code)
|
fcb4b7b0b522050ac31889918ec6161b46ee9406
|
b58b6b1400676b2a62ce4611dd62c906beaeb50c
|
/bfxapi/utils/logger.py
|
6ebac5a64741132a8d470c3e22bc37597ff55af1
|
[
"Apache-2.0"
] |
permissive
|
bitfinexcom/bitfinex-api-py
|
caf5254c2273ea35cc5887de10e2b13e60e178f8
|
e281f7be9ec36cb601094082711d86cf5ca4fa68
|
refs/heads/master
| 2023-08-04T23:46:35.901385
| 2023-06-23T07:42:47
| 2023-06-23T07:42:47
| 157,535,962
| 195
| 155
|
Apache-2.0
| 2023-07-28T13:19:59
| 2018-11-14T11:03:37
|
Python
|
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
logger.py
|
import logging, sys
BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8)
COLOR_SEQ, ITALIC_COLOR_SEQ = "\033[1;%dm", "\033[3;%dm"
COLORS = {
"DEBUG": CYAN,
"INFO": BLUE,
"WARNING": YELLOW,
"ERROR": RED
}
RESET_SEQ = "\033[0m"
class _ColorFormatter(logging.Formatter):
def __init__(self, msg, use_color = True):
logging.Formatter.__init__(self, msg, "%d-%m-%Y %H:%M:%S")
self.use_color = use_color
def format(self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
record.name = ITALIC_COLOR_SEQ % (30 + BLACK) + record.name + RESET_SEQ
record.levelname = COLOR_SEQ % (30 + COLORS[levelname]) + levelname + RESET_SEQ
return logging.Formatter.format(self, record)
class ColorLogger(logging.Logger):
FORMAT = "[%(name)s] [%(levelname)s] [%(asctime)s] %(message)s"
def __init__(self, name, level):
logging.Logger.__init__(self, name, level)
colored_formatter = _ColorFormatter(self.FORMAT, use_color=True)
handler = logging.StreamHandler(stream=sys.stderr)
handler.setFormatter(fmt=colored_formatter)
self.addHandler(hdlr=handler)
class FileLogger(logging.Logger):
FORMAT = "[%(name)s] [%(levelname)s] [%(asctime)s] %(message)s"
def __init__(self, name, level, filename):
logging.Logger.__init__(self, name, level)
formatter = logging.Formatter(self.FORMAT)
handler = logging.FileHandler(filename=filename)
handler.setFormatter(fmt=formatter)
self.addHandler(hdlr=handler)
|
2528164a7d096e3600207663680774e9cc673872
|
e60427c9aed40554579fb7497f6a1583c6819d00
|
/androidemu/emulator.py
|
a7e0f737996972c2813782df4d5de53713a05169
|
[] |
no_license
|
maiyao1988/ExAndroidNativeEmu
|
3d21da0edee6168b58f3cba10d0cb667a6b9615f
|
0915507b92699685af5dc266114f01451aed0fd2
|
refs/heads/master
| 2023-08-11T05:58:04.324095
| 2022-06-29T10:29:36
| 2022-06-29T10:29:36
| 236,504,849
| 551
| 165
| null | 2021-08-11T15:43:05
| 2020-01-27T14:10:24
|
Python
|
UTF-8
|
Python
| false
| false
| 11,902
|
py
|
emulator.py
|
import logging
import os
import time
import importlib
import inspect
import pkgutil
import sys
import os.path
from random import randint
from unicorn import *
from unicorn.arm_const import *
from unicorn.arm64_const import *
from . import config
from . import pcb
from .const import emu_const
from .cpu.syscall_handlers import SyscallHandlers
from .cpu.syscall_hooks import SyscallHooks
from .hooker import Hooker
from .internal.modules import Modules
from .java.helpers.native_method import native_write_args
from .java.java_classloader import JavaClassLoader
from .java.java_vm import JavaVM
from .native.symbol_hooks import SymbolHooks
from .native.memory_syscall_handler import MemorySyscallHandler
from .native.memory_map import MemoryMap
from .vfs.file_system import VirtualFileSystem
from .vfs.virtual_file import VirtualFile
from .utils import misc_utils
from .scheduler import Scheduler
from .java.java_class_def import JavaClassDef
from .java.constant_values import JAVA_NULL
#logger = logging.getLogger(__name__)
#logging.getLogger().setLevel(logging.DEBUG)
class Emulator:
# https://github.com/unicorn-engine/unicorn/blob/8c6cbe3f3cabed57b23b721c29f937dd5baafc90/tests/regress/arm_fp_vfp_disabled.py#L15
# 关于arm32 64 fp https://www.raspberrypi.org/forums/viewtopic.php?t=259802
# https://www.cnblogs.com/pengdonglin137/p/3727583.html
def __enable_vfp32(self):
# MRC p15, #0, r1, c1, c0, #2
# ORR r1, r1, #(0xf << 20)
# MCR p15, #0, r1, c1, c0, #2
# MOV r1, #0
# MCR p15, #0, r1, c7, c5, #4
# MOV r0,#0x40000000
# FMXR FPEXC, r0
code = '11EE501F'
code += '41F47001'
code += '01EE501F'
code += '4FF00001'
code += '07EE951F'
code += '4FF08040'
code += 'E8EE100A'
# vpush {d8}
code += '2ded028b'
address = 0x1000
mem_size = 0x1000
code_bytes = bytes.fromhex(code)
try:
self.mu.mem_map(address, mem_size)
self.mu.mem_write(address, code_bytes)
self.mu.reg_write(UC_ARM_REG_SP, address + mem_size)
self.mu.emu_start(address | 1, address + len(code_bytes))
finally:
self.mu.mem_unmap(address, mem_size)
#
#
#arm64
'''
mrs x1, cpacr_el1
mov x0, #(3 << 20)
orr x0, x1, x0
msr cpacr_el1, x0
'''
def __enable_vfp64(self):
#arm64 enable vfp
x = 0
x = self.mu.reg_read(UC_ARM64_REG_CPACR_EL1)
x |= 0x300000; # set FPEN bit
self.mu.reg_write(UC_ARM64_REG_CPACR_EL1, x)
#
def __add_classes(self):
cur_file_dir = os.path.dirname(__file__)
entry_file_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
#python 约定 package_name总是相对于入口脚本目录
package_name = os.path.relpath(cur_file_dir, entry_file_dir).replace("/", ".")
full_dirname = "%s/java/classes"%(cur_file_dir, )
preload_classes = set()
for importer, mod_name, c in pkgutil.iter_modules([full_dirname]):
import_name = ".java.classes.%s"%mod_name
m = importlib.import_module(import_name, package_name)
#print(dir(m))
clsList = inspect.getmembers(m, inspect.isclass)
for _, clz in clsList:
if (type(clz) == JavaClassDef):
preload_classes.add(clz)
#
#
#
for clz in preload_classes:
self.java_classloader.add_class(clz)
#
#also add classloader as java class
self.java_classloader.add_class(JavaClassLoader)
#
"""
:type mu Uc
:type modules Modules
:type memory Memory
"""
def __init__(self, vfs_root="vfs", config_path="emu_cfg/default.json", vfp_inst_set=True, arch=emu_const.ARCH_ARM32, muti_task=False):
# Unicorn.
sys.stdout = sys.stderr
#由于这里的stream只能改一次,为避免与fork之后的子进程写到stdout混合,将这些log写到stderr
#FIXME:解除这种特殊的依赖
self.config = config.Config(config_path)
self.__arch = arch
self.__support_muti_task = muti_task
self.__pcb = pcb.Pcb()
logging.info("process pid:%d"%self.__pcb.get_pid())
sp_reg = 0
if arch == emu_const.ARCH_ARM32:
self.__ptr_sz = 4
self.mu = Uc(UC_ARCH_ARM, UC_MODE_ARM)
if vfp_inst_set:
self.__enable_vfp32()
#
sp_reg = UC_ARM_REG_SP
self.call_native = self.__call_native32
self.call_native_return_2reg = self.__call_native_return_2reg32
#
elif arch == emu_const.ARCH_ARM64:
self.__ptr_sz = 8
self.mu = Uc(UC_ARCH_ARM64, UC_MODE_ARM)
if vfp_inst_set:
self.__enable_vfp64()
#
sp_reg = UC_ARM64_REG_SP
self.call_native = self.__call_native64
self.call_native_return_2reg = self.__call_native_return_2reg64
#
else:
raise RuntimeError("emulator arch=%d not support!!!"%arch)
#
self.__vfs_root = vfs_root
#注意,原有缺陷,原来linker初始化没有完成init_tls部分,导致libc初始化有访问空指针而无法正常完成
#而这里直接将0映射空间,,强行运行过去,因为R1刚好为0,否则会报memory unmap异常
#最新版本已经解决这个问题,无需再这么映射
#self.mu.mem_map(0x0, 0x00001000, UC_PROT_READ | UC_PROT_WRITE)
# Android 4.4
if arch == emu_const.ARCH_ARM32:
self.system_properties = {"libc.debug.malloc.options": "", "ro.build.version.sdk":"19", "ro.build.version.release":"4.4.4","persist.sys.dalvik.vm.lib":"libdvm.so", "ro.product.cpu.abi":"armeabi-v7a", "ro.product.cpu.abi2":"armeabi",
"ro.product.manufacturer":"LGE", "ro.product.manufacturer":"LGE", "ro.debuggable":"0", "ro.product.model":"AOSP on HammerHead","ro.hardware":"hammerhead", "ro.product.board":"hammerhead", "ro.product.device":"hammerhead",
"ro.build.host":"833d1eed3ea3", "ro.build.type":"user",
"ro.secure":"1", "wifi.interface":"wlan0", "ro.product.brand":"Android",
}
#
else:
#FIXME 这里arm64用 6.0,应该arm32也统一使用6.0
# Android 6.0
self.system_properties = {"libc.debug.malloc.options": "", "ro.build.version.sdk":"23", "ro.build.version.release":"6.0.1","persist.sys.dalvik.vm.lib2":"libart.so", "ro.product.cpu.abi":"arm64-v8a",
"ro.product.manufacturer":"LGE", "ro.product.manufacturer":"LGE", "ro.debuggable":"0", "ro.product.model":"AOSP on HammerHead","ro.hardware":"hammerhead", "ro.product.board":"hammerhead", "ro.product.device":"hammerhead",
"ro.build.host":"833d1eed3ea3", "ro.build.type":"user",
"ro.secure":"1", "wifi.interface":"wlan0", "ro.product.brand":"Android",
}
#
self.memory = MemoryMap(self.mu, config.MAP_ALLOC_BASE, config.MAP_ALLOC_BASE+config.MAP_ALLOC_SIZE)
# Stack.
addr = self.memory.map(config.STACK_ADDR, config.STACK_SIZE, UC_PROT_READ | UC_PROT_WRITE)
self.mu.reg_write(sp_reg, config.STACK_ADDR + config.STACK_SIZE)
#sp = self.mu.reg_read(sp_reg)
#print ("stack addr %x"%sp)
self.__sch = Scheduler(self)
# CPU
self.__syscall_handler = SyscallHandlers(self.mu, self.__sch, self.get_arch())
# Hooker
self.memory.map(config.BRIDGE_MEMORY_BASE, config.BRIDGE_MEMORY_SIZE, UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC)
self.__hooker = Hooker(self, config.BRIDGE_MEMORY_BASE, config.BRIDGE_MEMORY_SIZE)
#syscalls
self.__mem_handler = MemorySyscallHandler(self, self.memory, self.__syscall_handler)
self.__syscall_hooks = SyscallHooks(self, self.config, self.__syscall_handler)
self.__vfs = VirtualFileSystem(self, vfs_root, self.config, self.__syscall_handler, self.memory)
# JavaVM
self.java_classloader = JavaClassLoader()
self.java_vm = JavaVM(self, self.java_classloader, self.__hooker)
# linker
self.modules = Modules(self, self.__vfs_root)
# Native
self.__sym_hooks = SymbolHooks(self, self.modules, self.__hooker, self.__vfs_root)
self.__add_classes()
#Hack 为jmethod_id指向的内存分配一块空间,抖音会将jmethodID强转,为的是绕过去
self.memory.map(config.JMETHOD_ID_BASE, 0x2000, UC_PROT_READ | UC_PROT_WRITE | UC_PROT_EXEC)
if arch == emu_const.ARCH_ARM32:
#映射常用的文件,cpu一些原子操作的函数实现地方
path = "%s/system/lib/vectors"%vfs_root
vf = VirtualFile("[vectors]", misc_utils.my_open(path, os.O_RDONLY), path)
self.memory.map(0xffff0000, 0x1000, UC_PROT_EXEC | UC_PROT_READ, vf, 0)
#映射app_process,android系统基本特征
path = "%s/system/bin/app_process32"%vfs_root
sz = os.path.getsize(path)
vf = VirtualFile("/system/bin/app_process32", misc_utils.my_open(path, os.O_RDONLY), path)
self.memory.map(0xab006000, sz, UC_PROT_EXEC | UC_PROT_READ, vf, 0)
#
else:
#映射app_process,android系统基本特征
path = "%s/system/bin/app_process64"%vfs_root
sz = os.path.getsize(path)
vf = VirtualFile("/system/bin/app_process64", misc_utils.my_open(path, os.O_RDONLY), path)
self.memory.map(0xab006000, sz, UC_PROT_EXEC | UC_PROT_READ, vf, 0)
#
#
def get_vfs_root(self):
return self.__vfs_root
#
def load_library(self, filename, do_init=True):
libmod = self.modules.load_module(filename, True)
return libmod
#
def call_symbol(self, module, symbol_name, *argv):
symbol_addr = module.find_symbol(symbol_name)
if symbol_addr is None:
logging.error('Unable to find symbol \'%s\' in module \'%s\'.' % (symbol_name, module.filename))
return
return self.call_native(symbol_addr, *argv)
#
def __call_native32(self, addr, *argv):
assert addr != None, "call addr is None, make sure your jni native function has registered by RegisterNative!"
native_write_args(self, *argv)
self.__sch.exec(addr)
# Read result from locals if jni.
res = self.mu.reg_read(UC_ARM_REG_R0)
return res
#
def __call_native64(self, addr, *argv):
assert addr != None, "call addr is None, make sure your jni native function has registered by RegisterNative!"
native_write_args(self, *argv)
self.__sch.exec(addr)
# Read result from locals if jni.
res = self.mu.reg_read(UC_ARM64_REG_X0)
return res
#
#返回值8个字节,用两个寄存器保存
def __call_native_return_2reg32(self, addr, *argv):
res = self.__call_native32(addr, *argv)
res_high = self.mu.reg_read(UC_ARM_REG_R1)
return (res_high << 32) | res
#
#返回值16个字节,用两个寄存器保存
def __call_native_return_2reg64(self, addr, *argv):
res = self.__call_native64(addr, *argv)
res_high = self.mu.reg_read(UC_ARM64_REG_X1)
return (res_high << 64) | res
#
def get_arch(self):
return self.__arch
#
def get_ptr_size(self):
return self.__ptr_sz
#
def get_pcb(self):
return self.__pcb
#
def get_schduler(self):
return self.__sch
#
def get_muti_task_support(self):
return self.__support_muti_task
#
#
|
a450edab6d3283d244544c232cfabc356dddfe0e
|
e1cddfd754d952134e72dfd03522c5ea4fb6008e
|
/test/test_policer_input.py
|
6b4ab54a37e2683f485df36adf32a9127bd83a9f
|
[
"Apache-2.0"
] |
permissive
|
FDio/vpp
|
0ad30fa1bec2975ffa6b66b45c9f4f32163123b6
|
f234b0d4626d7e686422cc9dfd25958584f4931e
|
refs/heads/master
| 2023-08-31T16:09:04.068646
| 2022-03-14T09:49:15
| 2023-08-31T09:50:00
| 96,556,718
| 1,048
| 630
|
Apache-2.0
| 2023-06-21T05:39:17
| 2017-07-07T16:29:40
|
C
|
UTF-8
|
Python
| false
| false
| 8,034
|
py
|
test_policer_input.py
|
#!/usr/bin/env python3
# Copyright (c) 2021 Graphiant, Inc.
import unittest
import scapy.compat
from scapy.layers.inet import IP, UDP
from scapy.layers.l2 import Ether
from scapy.packet import Raw
from framework import VppTestCase, VppTestRunner
from vpp_papi import VppEnum
from vpp_policer import VppPolicer, PolicerAction, Dir
NUM_PKTS = 67
class TestPolicerInput(VppTestCase):
"""Policer on an interface"""
vpp_worker_count = 2
def setUp(self):
super(TestPolicerInput, self).setUp()
self.create_pg_interfaces(range(2))
for i in self.pg_interfaces:
i.admin_up()
i.config_ip4()
i.resolve_arp()
self.pkt = (
Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac)
/ IP(src=self.pg0.remote_ip4, dst=self.pg1.remote_ip4)
/ UDP(sport=1234, dport=1234)
/ Raw(b"\xa5" * 100)
)
def tearDown(self):
for i in self.pg_interfaces:
i.unconfig_ip4()
i.admin_down()
super(TestPolicerInput, self).tearDown()
def policer_interface_test(self, dir: Dir):
pkts = self.pkt * NUM_PKTS
action_tx = PolicerAction(
VppEnum.vl_api_sse2_qos_action_type_t.SSE2_QOS_ACTION_API_TRANSMIT, 0
)
policer = VppPolicer(
self,
"pol1",
80,
0,
1000,
0,
conform_action=action_tx,
exceed_action=action_tx,
violate_action=action_tx,
)
policer.add_vpp_config()
sw_if_index = self.pg0.sw_if_index if dir == Dir.RX else self.pg1.sw_if_index
# Start policing on pg0
policer.apply_vpp_config(sw_if_index, dir, True)
rx = self.send_and_expect(self.pg0, pkts, self.pg1, worker=0)
stats = policer.get_stats()
# Single rate, 2 colour policer - expect conform, violate but no exceed
self.assertGreater(stats["conform_packets"], 0)
self.assertEqual(stats["exceed_packets"], 0)
self.assertGreater(stats["violate_packets"], 0)
# Stop policing on pg0
policer.apply_vpp_config(sw_if_index, dir, False)
rx = self.send_and_expect(self.pg0, pkts, self.pg1, worker=0)
statsnew = policer.get_stats()
# No new packets counted
self.assertEqual(stats, statsnew)
policer.remove_vpp_config()
def test_policer_input(self):
"""Input Policing"""
self.policer_interface_test(Dir.RX)
def test_policer_output(self):
"""Output Policing"""
self.policer_interface_test(Dir.TX)
def test_policer_reset(self):
"""Policer reset bucket"""
pkts = self.pkt * NUM_PKTS
action_tx = PolicerAction(
VppEnum.vl_api_sse2_qos_action_type_t.SSE2_QOS_ACTION_API_TRANSMIT, 0
)
policer = VppPolicer(
self,
"pol1",
1,
0,
10000,
0,
conform_action=action_tx,
exceed_action=action_tx,
violate_action=action_tx,
)
policer.add_vpp_config()
# Start policing on pg0
policer.apply_vpp_config(self.pg0.sw_if_index, Dir.RX, True)
self.send_and_expect(self.pg0, pkts, self.pg1, worker=0)
details = policer.get_details()
self.assertGreater(details.current_limit, details.current_bucket)
self.send_and_expect(self.pg0, pkts, self.pg1, worker=0)
self.vapi.policer_reset(policer_index=policer.policer_index)
details = policer.get_details()
self.assertEqual(details.current_limit, details.current_bucket)
policer.apply_vpp_config(self.pg0.sw_if_index, Dir.RX, False)
policer.remove_vpp_config()
def test_policer_update(self):
"""Policer update"""
pkts = self.pkt * NUM_PKTS
action_tx = PolicerAction(
VppEnum.vl_api_sse2_qos_action_type_t.SSE2_QOS_ACTION_API_TRANSMIT, 0
)
policer = VppPolicer(
self,
"pol1",
1,
0,
10000,
0,
conform_action=action_tx,
exceed_action=action_tx,
violate_action=action_tx,
)
policer.add_vpp_config()
# Start policing on pg0
policer.apply_vpp_config(self.pg0.sw_if_index, Dir.RX, True)
self.send_and_expect(self.pg0, pkts, self.pg1, worker=0)
details_before = policer.get_details()
self.assertGreater(details_before.current_limit, details_before.current_bucket)
policer.cir = 8000
policer.commited_burst = 100000
policer.update()
details_after = policer.get_details()
self.assertGreater(details_after.cir, details_before.cir)
self.assertGreater(details_after.cb, details_before.cb)
policer.apply_vpp_config(self.pg0.sw_if_index, Dir.RX, False)
policer.remove_vpp_config()
def policer_handoff_test(self, dir: Dir):
pkts = self.pkt * NUM_PKTS
action_tx = PolicerAction(
VppEnum.vl_api_sse2_qos_action_type_t.SSE2_QOS_ACTION_API_TRANSMIT, 0
)
policer = VppPolicer(
self,
"pol2",
80,
0,
1000,
0,
conform_action=action_tx,
exceed_action=action_tx,
violate_action=action_tx,
)
policer.add_vpp_config()
sw_if_index = self.pg0.sw_if_index if dir == Dir.RX else self.pg1.sw_if_index
# Bind the policer to worker 1
policer.bind_vpp_config(1, True)
# Start policing on pg0
policer.apply_vpp_config(sw_if_index, dir, True)
for worker in [0, 1]:
self.send_and_expect(self.pg0, pkts, self.pg1, worker=worker)
self.logger.debug(self.vapi.cli("show trace max 100"))
stats = policer.get_stats()
stats0 = policer.get_stats(worker=0)
stats1 = policer.get_stats(worker=1)
# Worker 1, should have done all the policing
self.assertEqual(stats, stats1)
# Worker 0, should have handed everything off
self.assertEqual(stats0["conform_packets"], 0)
self.assertEqual(stats0["exceed_packets"], 0)
self.assertEqual(stats0["violate_packets"], 0)
# Unbind the policer from worker 1 and repeat
policer.bind_vpp_config(1, False)
for worker in [0, 1]:
self.send_and_expect(self.pg0, pkts, self.pg1, worker=worker)
self.logger.debug(self.vapi.cli("show trace max 100"))
# The policer should auto-bind to worker 0 when packets arrive
stats = policer.get_stats()
# The 2 workers should now have policed the same amount
stats = policer.get_stats()
stats0 = policer.get_stats(worker=0)
stats1 = policer.get_stats(worker=1)
self.assertGreater(stats0["conform_packets"], 0)
self.assertEqual(stats0["exceed_packets"], 0)
self.assertGreater(stats0["violate_packets"], 0)
self.assertGreater(stats1["conform_packets"], 0)
self.assertEqual(stats1["exceed_packets"], 0)
self.assertGreater(stats1["violate_packets"], 0)
self.assertEqual(
stats0["conform_packets"] + stats1["conform_packets"],
stats["conform_packets"],
)
self.assertEqual(
stats0["violate_packets"] + stats1["violate_packets"],
stats["violate_packets"],
)
# Stop policing on pg0
policer.apply_vpp_config(sw_if_index, dir, False)
policer.remove_vpp_config()
def test_policer_handoff_input(self):
"""Worker thread handoff policer input"""
self.policer_handoff_test(Dir.RX)
def test_policer_handoff_output(self):
"""Worker thread handoff policer output"""
self.policer_handoff_test(Dir.TX)
if __name__ == "__main__":
unittest.main(testRunner=VppTestRunner)
|
0673e6613e98c7339a095d9c85f20ac8d9ed3ce9
|
4ccc4879839b3d2a71908ead145c884b0e525927
|
/tests/test_blobxfer_operations_azure.py
|
7af007aa84d698b961ce6f4e5a004f8754ea8001
|
[
"MIT"
] |
permissive
|
Azure/blobxfer
|
185c565aa51d2bad3fc177203c62cd1c8b4e1730
|
0ac1212326a43dfd6cb2b8525ff95f1c4ae540af
|
refs/heads/master
| 2023-08-28T22:23:38.915857
| 2022-10-24T21:48:51
| 2022-10-24T21:48:51
| 64,148,806
| 155
| 55
|
MIT
| 2023-08-27T19:08:47
| 2016-07-25T16:03:06
|
Python
|
UTF-8
|
Python
| false
| false
| 21,232
|
py
|
test_blobxfer_operations_azure.py
|
# coding=utf-8
"""Tests for operations azure"""
# stdlib imports
import unittest.mock as mock
import pathlib
# non-stdlib imports
import azure.storage.blob
import azure.storage.file
import pytest
# local imports
import blobxfer.models.metadata as md
import blobxfer.models.options
# module under test
import blobxfer.models.azure as azmodels
import blobxfer.operations.azure as azops
def test_storage_credentials():
go = mock.MagicMock()
go.timeout.max_retries = None
creds = azops.StorageCredentials(go)
with pytest.raises(ValueError):
creds.add_storage_account('sa1', '', 'core.windows.net')
with pytest.raises(ValueError):
creds.add_storage_account(
'sa1', 'somekey1', 'https://blob.core.windows.net')
creds.add_storage_account('sa1', 'somekey1', 'core.windows.net')
a = creds.get_storage_account('sa1')
assert a.name == 'sa1'
assert a.key == 'somekey1'
assert a.endpoint == 'core.windows.net'
assert isinstance(
a.append_blob_client, azure.storage.blob.AppendBlobService)
assert isinstance(
a.block_blob_client, azure.storage.blob.BlockBlobService)
assert isinstance(
a.file_client, azure.storage.file.FileService)
assert isinstance(
a.page_blob_client, azure.storage.blob.PageBlobService)
with pytest.raises(KeyError):
a = creds.get_storage_account('sa2')
with pytest.raises(ValueError):
creds.add_storage_account('sa1', 'somekeyxx', 'core.windows.net')
creds.add_storage_account('sa2', 'somekey2', 'core.cloudapi.de')
a = creds.get_storage_account('sa1')
b = creds.get_storage_account('sa2')
assert a.name == 'sa1'
assert a.key == 'somekey1'
assert a.endpoint == 'core.windows.net'
assert b.name == 'sa2'
assert b.key == 'somekey2'
assert b.endpoint == 'core.cloudapi.de'
def test_key_is_sas():
to = mock.MagicMock()
to.max_retries = None
a = azops.StorageAccount(
'name', 'AAAAAA==', 'core.windows.net', 10, to, mock.MagicMock())
assert not a.is_sas
with pytest.raises(ValueError):
a = azops.StorageAccount(
'name', 'abcdef&blah', 'core.windows.net', 10, to, None)
a = azops.StorageAccount(
'name', '?abcdef', 'core.windows.net', 10, to, None)
assert a.is_sas
a = azops.StorageAccount(
'name', '?sv=0&sr=1&sig=2', 'core.windows.net', 10, to, None)
assert a.is_sas
a = azops.StorageAccount(
'name', 'sv=0&sr=1&sig=2', 'core.windows.net', 10, to, None)
assert a.is_sas
a = azops.StorageAccount(
'name', 'sig=0&sv=0&sr=1&se=2', 'core.windows.net', 10, to, None)
assert a.is_sas
def test_container_manipulation_allowed():
to = mock.MagicMock()
to.max_retries = None
a = azops.StorageAccount(
'name', 'AAAAAA==', 'core.windows.net', 10, to, None)
assert a._container_manipulation_allowed()
a = azops.StorageAccount(
'name', '?sv=0&sr=1&sig=2', 'core.windows.net', 10, to, None)
assert not a._container_manipulation_allowed()
a = azops.StorageAccount(
'name', '?sv=0&sr=1&srt=ao&sig=2', 'core.windows.net', 10, to, None)
assert not a._container_manipulation_allowed()
a = azops.StorageAccount(
'name', '?sv=0&sr=1&srt=co&sig=2', 'core.windows.net', 10, to, None)
assert a._container_manipulation_allowed()
def test_ensure_object_manipulation_allowed():
to = mock.MagicMock()
to.max_retries = None
with pytest.raises(ValueError):
azops.StorageAccount(
'name', '?sv=0&sr=1&srt=c&sig=2', 'core.windows.net', 10, to, None)
a = azops.StorageAccount(
'name', 'AAAAAA==', 'core.windows.net', 10, to, None)
assert a._ensure_object_manipulation_allowed()
def test_credential_allows_container_list():
to = mock.MagicMock()
to.max_retries = None
a = azops.StorageAccount(
'name', '?sv=0&sr=1&srt=co&sp=l&sig=2', 'core.windows.net', 10, to,
None)
assert a._credential_allows_container_list()
assert a.can_list_container_objects
a = azops.StorageAccount(
'name', '?sv=0&sr=s&sp=l&sig=2', 'core.windows.net', 10, to,
None)
assert a._credential_allows_container_list()
assert a.can_list_container_objects
a = azops.StorageAccount(
'name', '?sv=0&sr=f&sp=rl&sig=2', 'core.windows.net', 10, to,
None)
assert not a._credential_allows_container_list()
assert not a.can_list_container_objects
a = azops.StorageAccount(
'name', '?sv=0&si=policy&sig=2', 'core.windows.net', 10, to, None)
assert a._credential_allows_container_list()
assert a.can_list_container_objects
a = azops.StorageAccount(
'name', '?sv=0&sr=1&srt=co&sp=r&sig=2', 'core.windows.net', 10, to,
None)
assert not a._credential_allows_container_list()
assert not a.can_list_container_objects
a = azops.StorageAccount(
'name', 'AAAAAA==', 'core.windows.net', 10, to, None)
assert a._credential_allows_container_list()
assert a.can_list_container_objects
def test_credential_allows_object_read():
to = mock.MagicMock()
to.max_retries = None
a = azops.StorageAccount(
'name', '?sv=0&sr=1&srt=co&sp=r&sig=2', 'core.windows.net', 10, to,
None)
assert a._credential_allows_object_read()
assert a.can_read_object
a = azops.StorageAccount(
'name', '?sp=r&sr=b&sig=2', 'core.windows.net', 10, to, None)
assert a._credential_allows_object_read()
assert a.can_read_object
a = azops.StorageAccount(
'name', 'sp=r&sr=b&sig=2', 'core.windows.net', 10, to, None)
assert a._credential_allows_object_read()
assert a.can_read_object
a = azops.StorageAccount(
'name', '?sr=b&sp=r&sig=2', 'core.windows.net', 10, to, None)
assert a._credential_allows_object_read()
assert a.can_read_object
a = azops.StorageAccount(
'name', 'sr=b&sp=r&sig=2', 'core.windows.net', 10, to, None)
assert a._credential_allows_object_read()
assert a.can_read_object
a = azops.StorageAccount(
'name', '?sv=0&si=policy&sig=2', 'core.windows.net', 10, to, None)
assert a._credential_allows_object_read()
assert a.can_read_object
a = azops.StorageAccount(
'name', '?sv=0&sr=1&srt=co&sp=w&sig=2', 'core.windows.net', 10, to,
None)
assert not a._credential_allows_object_read()
assert not a.can_read_object
a = azops.StorageAccount(
'name', 'AAAAAA==', 'core.windows.net', 10, to, None)
assert a._credential_allows_object_read()
assert a.can_read_object
def test_credential_allows_object_write():
to = mock.MagicMock()
to.max_retries = None
a = azops.StorageAccount(
'name', '?sv=0&sr=1&srt=co&sp=w&sig=2', 'core.windows.net', 10, to,
None)
assert a._credential_allows_object_write()
assert a.can_write_object
a = azops.StorageAccount(
'name', '?sv=0&si=policy&sig=2', 'core.windows.net', 10, to, None)
assert a._credential_allows_object_write()
assert a.can_write_object
a = azops.StorageAccount(
'name', '?sv=0&sr=1&srt=co&sp=r&sig=2', 'core.windows.net', 10, to,
None)
assert not a._credential_allows_object_write()
assert not a.can_write_object
a = azops.StorageAccount(
'name', 'AAAAAA==', 'core.windows.net', 10, to, None)
assert a._credential_allows_object_write()
assert a.can_write_object
@mock.patch('blobxfer.operations.azure.file.get_file_properties')
@mock.patch('blobxfer.operations.azure.blob.get_blob_properties')
def test_handle_vectored_io_stripe(patched_gbp, patched_gfp):
creds = mock.MagicMock()
options = mock.MagicMock()
options.mode = azmodels.StorageModes.Block
store_raw_metadata = False
sa = mock.MagicMock()
is_file = False
container = 'cont'
entity = mock.MagicMock()
p = '/cont/remote/path'
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
# test not first slice
with mock.patch(
'blobxfer.models.metadata.vectored_io_from_metadata',
side_effect=[md.VectoredStripe(
next='nextpr',
offset_start=0,
slice_id=1,
total_size=10,
total_slices=10,
)]):
for part in asp._handle_vectored_io_stripe(
creds, options, store_raw_metadata, sa, entity, is_file,
container, dir=None):
assert part is None
# blob test
with mock.patch(
'blobxfer.models.metadata.'
'vectored_io_from_metadata') as patched_vifm:
patched_vifm.side_effect = [
md.VectoredStripe(
next=md.VectoredNextEntry(
storage_account_name='sa0',
endpoint='core.windows.net',
container='cont',
name='path-bxslice-0',
),
offset_start=0,
slice_id=0,
total_size=2,
total_slices=2,
),
md.VectoredStripe(
next=None,
offset_start=1,
slice_id=1,
total_size=2,
total_slices=2,
),
]
options.mode = azmodels.StorageModes.Block
b0 = azure.storage.blob.models.Blob(name='path-bxslice-0')
b1 = azure.storage.blob.models.Blob(name='path-bxslice-1')
patched_gbp.side_effect = [b0, b1]
i = 0
for part in asp._handle_vectored_io_stripe(
creds, options, store_raw_metadata, sa, entity, is_file,
container, dir=None):
i += 1
assert i == 2
# file test
with mock.patch(
'blobxfer.models.metadata.'
'vectored_io_from_metadata') as patched_vifm:
patched_vifm.side_effect = [
md.VectoredStripe(
next=md.VectoredNextEntry(
storage_account_name='sa0',
endpoint='core.windows.net',
container='cont',
name='path-bxslice-0',
),
offset_start=0,
slice_id=0,
total_size=2,
total_slices=2,
),
md.VectoredStripe(
next=None,
offset_start=1,
slice_id=1,
total_size=2,
total_slices=2,
),
]
options.mode = azmodels.StorageModes.File
is_file = True
f0 = azure.storage.file.models.File(name='path-bxslice-0')
f1 = azure.storage.file.models.File(name='path-bxslice-1')
patched_gfp.side_effect = [f0, f1]
i = 0
for part in asp._handle_vectored_io_stripe(
creds, options, store_raw_metadata, sa, entity, is_file,
container, dir=None):
i += 1
assert i == 2
@mock.patch('requests.head')
def test_populate_from_arbitrary_url(patched_rh):
response = mock.MagicMock()
response.headers = {
'Content-Length': 10
}
patched_rh.return_value = response
asp = azops.SourcePath()
ase = asp._populate_from_arbitrary_url('https://host/remote/path')
assert ase.size == 10
assert ase.path == 'https://host/remote/path'
assert ase.is_arbitrary_url
def test_azuresourcepath():
p = '/cont/remote/path'
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
with pytest.raises(RuntimeError):
asp.add_path_with_storage_account('x', 'x')
assert 'sa' == asp.lookup_storage_account(p)
asp = azops.SourcePath()
asp.add_arbitrary_remote_url('https://host/remote/path')
assert 'https://host/remote/path' in asp._paths
@mock.patch('blobxfer.models.crypto.EncryptionMetadata')
@mock.patch('blobxfer.operations.azure.file.list_files')
@mock.patch('blobxfer.operations.azure.file.get_file_properties')
@mock.patch('blobxfer.operations.azure.file.check_if_single_file')
def test_azuresourcepath_files(
patched_cisf, patched_gfp, patched_lf, patched_em):
p = 'cont/name'
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
options = mock.MagicMock()
options.mode = azmodels.StorageModes.File
creds = mock.MagicMock()
sa = mock.MagicMock()
sa.file_client = mock.MagicMock()
creds.get_storage_account.return_value = sa
f = azure.storage.file.models.File(name='name')
patched_cisf.return_value = (False, None)
patched_lf.side_effect = [[f]]
patched_em.encryption_metadata_exists = mock.MagicMock()
patched_em.encryption_metadata_exists.return_value = False
# test no read access
sa.can_read_object = False
with pytest.raises(RuntimeError):
next(asp.files(creds, options, False))
sa.can_read_object = True
# test normal container path
i = 0
for file in asp.files(creds, options, False):
i += 1
assert pathlib.Path(file.name) == pathlib.Path('name')
assert file.encryption_metadata is None
assert i == 1
p = '/cont/remote/path'
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
options = mock.MagicMock()
options.mode = azmodels.StorageModes.File
creds = mock.MagicMock()
sa = mock.MagicMock()
sa.file_client = mock.MagicMock()
creds.get_storage_account.return_value = sa
f = azure.storage.file.models.File(name='remote/name')
patched_cisf.return_value = (False, None)
patched_lf.side_effect = [[f]]
patched_em.encryption_metadata_exists = mock.MagicMock()
patched_em.encryption_metadata_exists.return_value = False
# test normal subdir path
i = 0
for file in asp.files(creds, options, False):
i += 1
assert pathlib.Path(file.name) == pathlib.Path('remote/name')
assert file.encryption_metadata is None
assert i == 1
# test no container list perm
sa.can_list_container_objects = False
patched_gfp.side_effect = [f]
i = 0
for file in asp.files(creds, options, False):
i += 1
assert pathlib.Path(file.name) == pathlib.Path('remote/name')
assert file.encryption_metadata is None
assert i == 1
# test no container list perm, nonexistent
patched_gfp.side_effect = [None]
i = 0
for file in asp.files(creds, options, False):
i += 1
assert i == 0
# test no container list perm, filter dry run
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
asp.add_includes(['zzz'])
patched_cisf.return_value = (True, f)
patched_gfp.side_effect = [f]
assert len(list(asp.files(creds, options, True))) == 0
# test no container list perm, no vio return
with mock.patch(
'blobxfer.operations.azure.SourcePath.'
'_handle_vectored_io_stripe') as patched_hvios:
patched_hvios.side_effect = [[None]]
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
patched_gfp.side_effect = [f]
assert len(list(asp.files(creds, options, False))) == 0
sa.can_list_container_objects = True
# test filter
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
asp.add_includes(['zzz'])
patched_cisf.return_value = (True, f)
patched_lf.side_effect = [[f]]
assert len(list(asp.files(creds, options, True))) == 0
# test no vio return
with mock.patch(
'blobxfer.operations.azure.SourcePath.'
'_handle_vectored_io_stripe') as patched_hvios:
patched_hvios.side_effect = [[None]]
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
patched_lf.side_effect = [[f]]
assert len(list(asp.files(creds, options, False))) == 0
# test encrypted
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
fe = azure.storage.file.models.File(name='remote/name')
fe.metadata = {'encryptiondata': {'a': 'b'}}
patched_lf.side_effect = [[fe]]
patched_em.encryption_metadata_exists.return_value = True
patched_em.convert_from_json = mock.MagicMock()
i = 0
for file in asp.files(creds, options, True):
i += 1
assert pathlib.Path(file.name) == pathlib.Path('remote/name')
assert file.encryption_metadata is not None
assert i == 1
@mock.patch('blobxfer.models.crypto.EncryptionMetadata')
@mock.patch('blobxfer.operations.azure.blob.list_blobs')
@mock.patch('blobxfer.operations.azure.blob.get_blob_properties')
def test_azuresourcepath_blobs(patched_gbp, patched_lb, patched_em):
p = '/cont/remote/path'
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
options = mock.MagicMock()
options.mode = azmodels.StorageModes.Auto
creds = mock.MagicMock()
sa = mock.MagicMock()
sa.block_blob_client = mock.MagicMock()
creds.get_storage_account.return_value = sa
b = azure.storage.blob.models.Blob(name='name')
b.metadata = {}
patched_lb.side_effect = [[b]]
patched_em.encryption_metadata_exists = mock.MagicMock()
patched_em.encryption_metadata_exists.return_value = False
# test no read access
sa.can_read_object = False
with pytest.raises(RuntimeError):
next(asp.files(creds, options, False))
sa.can_read_object = True
# test normal path
i = 0
for file in asp.files(creds, options, False):
i += 1
assert file.name == 'name'
assert file.encryption_metadata is None
assert i == 1
# test normal path with metadata vdir sep
b.metadata[azops._METADATA_VIRTUAL_DIRECTORY] = 'true'
patched_lb.side_effect = [[b]]
i = 0
for file in asp.files(creds, options, False):
i += 1
assert i == 0
b.metadata = {}
# test no container list perm
sa.can_list_container_objects = False
patched_gbp.side_effect = [b]
i = 0
for file in asp.files(creds, options, False):
i += 1
assert file.name == 'name'
assert file.encryption_metadata is None
assert i == 1
# test no container list perm, nonexistent
patched_gbp.side_effect = [None]
i = 0
for file in asp.files(creds, options, False):
i += 1
assert i == 0
# test no container list perm, filter dry run
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
asp.add_includes(['zzz'])
patched_gbp.side_effect = [b]
assert len(list(asp.files(creds, options, True))) == 0
# test no container list perm, no vio return
with mock.patch(
'blobxfer.operations.azure.SourcePath.'
'_handle_vectored_io_stripe') as patched_hvios:
patched_hvios.side_effect = [[None]]
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
patched_gbp.side_effect = [b]
assert len(list(asp.files(creds, options, False))) == 0
sa.can_list_container_objects = True
# test filter
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
asp.add_includes(['zzz'])
patched_lb.side_effect = [[b]]
assert len(list(asp.files(creds, options, True))) == 0
# test no vio return
with mock.patch(
'blobxfer.operations.azure.SourcePath.'
'_handle_vectored_io_stripe') as patched_hvios:
patched_hvios.side_effect = [[None]]
asp = azops.SourcePath()
asp.add_path_with_storage_account(p, 'sa')
patched_lb.side_effect = [[b]]
assert len(list(asp.files(creds, options, False))) == 0
be = azure.storage.blob.models.Blob(name='name')
be.metadata = {'encryptiondata': {'a': 'b'}}
patched_lb.side_effect = [[be]]
patched_em.encryption_metadata_exists.return_value = True
patched_em.convert_from_json = mock.MagicMock()
i = 0
for file in asp.files(creds, options, False):
i += 1
assert file.name == 'name'
assert file.encryption_metadata is not None
assert i == 1
def test_azuresourcepath_url():
asp = azops.SourcePath()
asp.add_arbitrary_remote_url('https://host/remote/path')
asp._populate_from_arbitrary_url = mock.MagicMock()
sc = blobxfer.models.options.SyncCopy(
access_tier=None,
delete_extraneous_destination=None,
delete_only=None,
dest_mode=None,
mode=None,
overwrite=None,
recursive=None,
rename=None,
server_side_copy=True,
strip_components=0,
)
i = 0
for ase in asp._populate_from_list_blobs(mock.MagicMock(), sc, False):
i += 1
assert asp._populate_from_arbitrary_url.call_count == 1
assert i == 1
def test_destinationpath():
dp = azops.DestinationPath()
sa = mock.MagicMock()
dp.add_path_with_storage_account('/remote/path/', sa)
assert len(dp._paths) == 1
assert len(dp._path_map) == 1
with pytest.raises(RuntimeError):
dp.add_path_with_storage_account('/remote/path2/', sa)
assert dp.lookup_storage_account('/remote/path/') is not None
|
d4f2fce9732580061de81674d0528818e6e25dd4
|
3febe9bd6d3f0240754239bca7c02720a53dbe22
|
/pyblazing/pyblazing/apiv2/__init__.py
|
482e64babdf1b5d7b63ae729069ef86cc2f45111
|
[
"Apache-2.0"
] |
permissive
|
BlazingDB/blazingsql
|
9c7b1bdad1538a4478332de57375830090069e85
|
a35643d4c983334757eee96d5b9005b8b9fbd21b
|
refs/heads/branch-21.08
| 2023-08-17T16:10:36.051621
| 2021-09-30T21:51:09
| 2021-09-30T21:51:09
| 150,149,024
| 854
| 114
|
Apache-2.0
| 2022-09-16T23:58:36
| 2018-09-24T18:25:45
|
C++
|
UTF-8
|
Python
| false
| false
| 462
|
py
|
__init__.py
|
from enum import IntEnum, unique
# NOTE Same values from ral (bsqlengine) DataType (DataType.h and cio.pdx)
@unique
class DataType(IntEnum):
UNDEFINED = (999,)
PARQUET = (0,)
ORC = (1,)
CSV = (2,)
JSON = (3,)
CUDF = (4,)
DASK_CUDF = 5
ARROW = 6
MYSQL = 7,
POSTGRESQL = 8,
SQLITE = 9
# NOTE Same values from io
@unique
class S3EncryptionType(IntEnum):
UNDEFINED = 0
NONE = 1
AES_256 = 2
AWS_KMS = 3
|
93681abec494d1ac25ddca0e8db5c8e95ae1327a
|
9c87c7ddaf5011cc475ec6f4343cc1c7ff290b33
|
/wetterdienst/boot.py
|
0167fdb36d3afd00bc49e3581aa400ff87dfe667
|
[
"MIT"
] |
permissive
|
earthobservations/wetterdienst
|
c74d80bbb4ce178f3e42936ca7364f9bee66d83b
|
448fbd56b67978cf8f4215dedc02a11b89f66b01
|
refs/heads/main
| 2023-08-08T12:02:45.115090
| 2023-07-30T16:13:41
| 2023-07-30T16:37:09
| 160,953,150
| 283
| 42
|
MIT
| 2023-09-10T22:36:33
| 2018-12-08T15:39:42
|
Python
|
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
boot.py
|
# """Wetterdienst - Open weather data for humans"""
# -*- coding: utf-8 -*-
# Copyright (C) 2018-2023, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
def monkeypatch():
from wetterdienst.monkeypatch import fsspec_monkeypatch
fsspec_monkeypatch.activate()
def get_version(appname):
from importlib.metadata import PackageNotFoundError, version # noqa
try:
return version(appname)
except PackageNotFoundError: # pragma: no cover
return "unknown"
def info() -> None:
"""Print basic information about the wetterdienst package"""
from wetterdienst import Settings, __version__
wd_info = {
"version": __version__,
"authors": "Benjamin Gutzmann <gutzemann@gmail.com>, Andreas Motl <andreas.motl@panodata.org>",
"documentation": "https://wetterdienst.readthedocs.io/",
"repository": "https://github.com/earthobservations/wetterdienst",
"cache_dir (default)": Settings().cache_dir,
}
text = get_title("Wetterdienst - Open weather data for humans")
for key, value in wd_info.items():
text += f"\n{key}:\t {value}"
print(text) # noqa: T201
return
def get_title(text: str) -> str:
line = "=" * len(text)
return f"{line}\n{text}\n{line}"
|
710698844db5bc53a757fdae5b41449a7b56c2c1
|
4d191dd155a746e2d1fb876c2a615b662d3ca8b6
|
/events/utils/default_events_builder.py
|
6a326f454f5e880d1c45cb7da2fa28eccd760ddf
|
[
"MIT"
] |
permissive
|
jeffshek/betterself
|
cf82fda5e9c62d22c882c0fe13409069a0f6273e
|
51468253fc31373eb96e0e82189b9413f3d76ff5
|
refs/heads/master
| 2023-09-01T02:13:17.917643
| 2020-08-10T18:50:54
| 2020-08-10T18:50:54
| 56,734,453
| 107
| 14
|
MIT
| 2023-09-12T19:22:22
| 2016-04-21T01:57:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,596
|
py
|
default_events_builder.py
|
from events.models import UserActivity
from supplements.models import Ingredient, Measurement, IngredientComposition, Supplement, UserSupplementStack, \
UserSupplementStackComposition
DEFAULT_ACTIVITIES = [
'Meditated',
'Went to Gym'
]
SPECIAL_ACTIVITIES = [
'Started New Job',
]
class DefaultEventsBuilder(object):
def __init__(self, user):
self.user = user
def build_defaults(self):
self.build_default_supplements()
self.build_default_activities()
def build_default_supplements(self):
caffeine_ingredient, _ = Ingredient.objects.get_or_create(
user=self.user, name='Caffeine')
mg_measurement = Measurement.objects.get(short_name='mg')
# Make 50 and 100mg IngredientCompositions
caffeine_50mg_composition, _ = IngredientComposition.objects.get_or_create(
user=self.user,
measurement=mg_measurement,
quantity=50,
ingredient=caffeine_ingredient
)
caffeine_100mg_composition, _ = IngredientComposition.objects.get_or_create(
user=self.user,
measurement=mg_measurement,
quantity=100,
ingredient=caffeine_ingredient
)
caffeine_200mg_composition, _ = IngredientComposition.objects.get_or_create(
user=self.user,
measurement=mg_measurement,
quantity=200,
ingredient=caffeine_ingredient
)
# Now create Supplements like Coffee / Black Tea
# that all have differing amounts of Caffeine
coffee, _ = Supplement.objects.get_or_create(
user=self.user,
name='Coffee'
)
coffee.ingredient_compositions.add(caffeine_200mg_composition)
black_tea, _ = Supplement.objects.get_or_create(
user=self.user,
name='Black Tea'
)
black_tea.ingredient_compositions.add(caffeine_100mg_composition)
stack, _ = UserSupplementStack.objects.get_or_create(
name='Energy', user=self.user)
for supplement in [black_tea, coffee]:
UserSupplementStackComposition.objects.get_or_create(
user=self.user, stack=stack, supplement=supplement)
def build_default_activities(self):
for activity_name in DEFAULT_ACTIVITIES:
UserActivity.objects.get_or_create(user=self.user, name=activity_name)
for activity_name in SPECIAL_ACTIVITIES:
UserActivity.objects.get_or_create(user=self.user, name=activity_name, is_significant_activity=True)
|
8ea64b2384f220346d992b58c896bab1bc61da82
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Alignment/SurveyAnalysis/test/run-converter_cfg.py
|
329f3b2dda5ff66d453594fc01090dbd1936a5ae
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 2,930
|
py
|
run-converter_cfg.py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("DATACONVERTER")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = 'auto:phase1_2022_realistic'
process.load("Configuration.Geometry.GeometryDB_cff")
process.load("Alignment.SurveyAnalysis.SurveyInfoScenario_cff")
process.MessageLogger = cms.Service("MessageLogger",
cerr = cms.untracked.PSet(
enable = cms.untracked.bool(False)
),
cout = cms.untracked.PSet(
enable = cms.untracked.bool(True),
enableStatistics = cms.untracked.bool(True),
noLineBreaks = cms.untracked.bool(True),
threshold = cms.untracked.string('DEBUG')
),
files = cms.untracked.PSet(
test = cms.untracked.PSet(
enableStatistics = cms.untracked.bool(True),
noLineBreaks = cms.untracked.bool(True),
threshold = cms.untracked.string('DEBUG')
)
)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.CondDB.CondDB_cfi")
process.CondDB.connect = cms.string('sqlite_file:TibTidTecAllSurvey.db')
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDB,
toPut = cms.VPSet(cms.PSet(record = cms.string('TrackerAlignmentRcd'),
tag = cms.string('TibTidTecAllSurvey_v2')
),
cms.PSet(record = cms.string('TrackerAlignmentErrorExtendedRcd'),
tag = cms.string('TibTidTecAllSurveyAPE_v2')
),
)
)
process.mydataconverter = cms.EDAnalyzer("SurveyDataConverter",
applyFineInfo = cms.bool(True),
MisalignmentScenario = cms.PSet(
process.SurveyInfoScenario
),
applyErrors = cms.bool(True),
textFileNames = cms.PSet(
forTID = cms.untracked.string('./TIDSurvey.dat'),
forTIB = cms.untracked.string('./TIBSurvey.dat')
),
applyCoarseInfo = cms.bool(True),
TOBerrors = cms.vdouble(0.014, 0.05, 0.02, 0.003),
TECerrors = cms.vdouble(0.06, 0.015, 0.007, 0.002),
TIDerrors = cms.vdouble(0.045, 0.035, 0.0185, 0.0054),
TIBerrors = cms.vdouble(0.075, 0.045, 0.018)
)
# process.print = cms.OutputModule("AsciiOutputModule")
process.p = cms.Path(process.mydataconverter)
# process.ep = cms.EndPath(process.print)
|
e92437c5ed6feb1a913b6721bcac3f81c3aba01d
|
99fa07ff170c4b5f880013a58f20a6412bd88dbf
|
/tests/_test_process_executor.py
|
f58af9f832dac30d0f89396d9f7bc9679fca506c
|
[
"BSD-3-Clause"
] |
permissive
|
joblib/loky
|
72df8afddfc55a6d2575a13730a1973bd71a49bb
|
05da9a84b6bae8dd4370f553ffcd06df99b54f86
|
refs/heads/master
| 2023-08-23T00:35:06.989283
| 2023-06-29T13:07:28
| 2023-06-29T13:07:28
| 48,578,152
| 244
| 32
|
BSD-3-Clause
| 2023-06-29T13:43:09
| 2015-12-25T11:16:10
|
Python
|
UTF-8
|
Python
| false
| false
| 42,704
|
py
|
_test_process_executor.py
|
from loky import process_executor
import os
import gc
import sys
import time
import shutil
import platform
import pytest
import weakref
import tempfile
import traceback
import threading
import faulthandler
import warnings
from math import sqrt
from pickle import PicklingError
from threading import Thread
from collections import defaultdict
from concurrent import futures
from concurrent.futures._base import (
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED,
)
import loky
from loky.process_executor import (
LokyRecursionError,
ShutdownExecutorError,
TerminatedWorkerError,
)
from loky._base import Future
from . import _executor_mixin
from .utils import id_sleep, check_subprocess_call, filter_match
from .test_reusable_executor import ErrorAtPickle, ExitAtPickle, c_exit
IS_PYPY = hasattr(sys, "pypy_version_info")
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
def mul(x, y):
return x * y
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def sleep_and_return(delay, x):
time.sleep(delay)
return x
def sleep_and_write(t, filename, msg):
time.sleep(t)
with open(filename, "w") as f:
f.write(str(msg))
class MyObject:
def __init__(self, value=0):
self.value = value
def __repr__(self):
return f"MyObject({self.value})"
def my_method(self):
pass
def _assert_no_error(stderr):
if sys.platform == "darwin":
# On macOS, ignore UserWarning related to their broken semaphore
# implementation.
stderr = "\n".join(
line
for line in stderr.splitlines()
if "increase its maximal value" not in line
)
assert len(stderr) == 0, stderr
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
with pytest.raises(RuntimeError):
self.executor.submit(pow, 2, 5)
def test_shutdown_with_pickle_error(self):
self.executor.shutdown()
with self.executor_type(max_workers=4) as e:
e.submit(id, ErrorAtPickle())
def test_shutdown_with_sys_exit_at_pickle(self):
self.executor.shutdown()
with self.executor_type(max_workers=4) as e:
e.submit(id, ExitAtPickle())
def test_interpreter_shutdown(self):
# Free resources to avoid random timeout in CI
self.executor.shutdown(wait=True, kill_workers=True)
executor_type = self.executor_type.__name__
start_method = self.context.get_start_method()
tempdir = tempfile.mkdtemp(prefix="loky_").replace("\\", "/")
try:
n_jobs = 4
code = f"""if True:
from loky.process_executor import {executor_type}
from loky.backend import get_context
from tests._test_process_executor import sleep_and_write
context = get_context("{start_method}")
e = {executor_type}({n_jobs}, context=context)
e.submit(id, 42).result()
task_ids = list(range(2 * {n_jobs}))
filenames = [f'{tempdir}/task_{{i:02}}.log'
for i in task_ids]
e.map(sleep_and_write, [0.1] * 2 * {n_jobs},
filenames, task_ids)
# Do not wait for the results: garbage collect executor and
# shutdown main Python interpreter while letting the worker
# processes finish in the background.
"""
_, stderr = check_subprocess_call(
[sys.executable, "-c", code], timeout=55
)
_assert_no_error(stderr)
# The workers should have completed their work before the main
# process exits:
expected_filenames = [
f"task_{i:02d}.log" for i in range(2 * n_jobs)
]
# Apparently files can take some time to appear under windows
# on AppVeyor
for _ in range(20):
filenames = sorted(os.listdir(tempdir))
if len(filenames) != len(expected_filenames):
time.sleep(1)
else:
break
assert filenames == expected_filenames
for i, filename in enumerate(filenames):
with open(os.path.join(tempdir, filename), "rb") as f:
assert int(f.read().strip()) == i
finally:
shutil.rmtree(tempdir)
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.01) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
assert len(self.executor._processes) == self.worker_count
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_processes_terminate_on_executor_gc(self):
results = self.executor.map(sleep_and_return, [0.1] * 10, range(10))
assert len(self.executor._processes) == self.worker_count
processes = self.executor._processes
executor_flags = self.executor._flags
# The following should trigger GC and therefore shutdown of workers.
# However the shutdown wait for all the pending jobs to complete
# first.
executor_reference = weakref.ref(self.executor)
self.executor = None
# Make sure that there is not other reference to the executor object.
# We have to be patient as _thread_management_worker might have a
# reference when we deleted self.executor.
t_deadline = time.time() + 1
while executor_reference() is not None and time.time() < t_deadline:
if IS_PYPY:
# PyPy can delay __del__ calls and GC compared to CPython.
# To ensure that this test pass without waiting too long we
# need an explicit GC.
gc.collect()
time.sleep(0.001)
assert executor_reference() is None
# The remaining jobs should still be processed in the background
for result, expected in zip(results, range(10)):
assert result == expected
# Once all pending jobs have completed the executor and threads should
# terminate automatically. Note that the effective time for a Python
# process to completely shutdown can vary a lot especially on loaded CI
# machines with and the atexit callbacks that writes test coverage data
# to disk. Let's be patient.
self.check_no_running_workers(patience=5)
assert executor_flags.shutdown, processes
assert not executor_flags.broken, processes
@classmethod
def _wait_and_crash(cls):
_executor_mixin._test_event.wait()
faulthandler._sigsegv()
def test_processes_crash_handling_after_executor_gc(self):
# Start 5 easy jobs on 5 workers
results = self.executor.map(sleep_and_return, [0.01] * 5, range(5))
# Enqueue a job that will trigger a crash of one of the workers.
# Make sure this crash does not happen before the non-failing jobs
# have returned their results by using and multiprocessing Event
# instance
crash_result = self.executor.submit(self._wait_and_crash)
assert len(self.executor._processes) == self.worker_count
processes = self.executor._processes
executor_flags = self.executor._flags
# The following should trigger the GC and therefore shutdown of
# workers. However the shutdown wait for all the pending jobs to
# complete first.
executor_reference = weakref.ref(self.executor)
self.executor = None
if IS_PYPY:
# Object deletion and garbage collection can be delayed under PyPy.
time.sleep(1.0)
gc.collect()
# Make sure that there is not other reference to the executor object.
assert executor_reference() is None
# The remaining jobs should still be processed in the background
for result, expected in zip(results, range(5)):
assert result == expected
# Let the crash job know that it can crash now
_executor_mixin._test_event.set()
# The crashing job should be executed after the non-failing jobs
# have completed. The crash should be detected.
match = filter_match("SIGSEGV")
with pytest.raises(TerminatedWorkerError, match=match):
crash_result.result()
_executor_mixin._test_event.clear()
# The executor flag should have been set at this point.
assert executor_flags.broken, processes
# Since the executor is broken, all workers should be SIGKILLed on
# POSIX or terminated one Windows. Usually this should be fast but
# let's be patient just in case the CI is overloaded.
self.check_no_running_workers(patience=5)
def test_context_manager_shutdown(self):
with self.executor_type(max_workers=5, context=self.context) as e:
processes = e._processes
assert list(e.map(abs, range(-5, 5))) == [
5,
4,
3,
2,
1,
0,
1,
2,
3,
4,
]
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = self.executor_type(max_workers=5, context=self.context)
list(executor.map(abs, range(-5, 5)))
executor_manager_thread = executor._executor_manager_thread
processes = executor._processes
del executor
if IS_PYPY:
# Object deletion and garbage collection can be delayed under PyPy.
time.sleep(1.0)
gc.collect()
executor_manager_thread.join()
for p in processes.values():
p.join()
@classmethod
def _wait_and_return(cls, x):
# This _test_event is passed globally through an initializer to
# the executor.
_executor_mixin._test_event.wait()
return x
def test_shutdown_no_wait(self):
# Ensure that the executor cleans up the processes when calling
# shutdown with wait=False
# Stores executor internals to be able to check that the executor
# shutdown correctly
processes = self.executor._processes
call_queue = self.executor._call_queue
executor_manager_thread = self.executor._executor_manager_thread
# submit tasks that will finish after the shutdown and make sure they
# were started
res = [
self.executor.submit(self._wait_and_return, x)
for x in range(-5, 5)
]
self.executor.shutdown(wait=False)
with pytest.raises(ShutdownExecutorError):
# It's no longer possible to submit any new tasks to this
# executor after shutdown.
self.executor.submit(lambda x: x, 42)
# Check that even after shutdown, all futures are still running
assert all(f._state in (PENDING, RUNNING) for f in res)
# Let the futures finish and make sure that all the executor resources
# were properly cleaned by the shutdown process
_executor_mixin._test_event.set()
executor_manager_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
# Make sure the results were all computed before the executor
# resources were freed.
assert [f.result() for f in res] == list(range(-5, 5))
def test_shutdown_deadlock_pickle(self):
# Test that the pool calling shutdown with wait=False does not cause
# a deadlock if a task fails at pickle after the shutdown call.
# Reported in bpo-39104.
self.executor.shutdown(wait=True)
with self.executor_type(
max_workers=2, context=self.context
) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
# Start the executor and get the executor_manager_thread to collect
# the threads and avoid dangling thread that should be cleaned up
# asynchronously.
executor.submit(id, 42).result()
executor_manager = executor._executor_manager_thread
# Submit a task that fails at pickle and shutdown the executor
# without waiting
f = executor.submit(id, ErrorAtPickle())
executor.shutdown(wait=False)
with pytest.raises(PicklingError):
f.result()
# Make sure the executor is eventually shutdown and do not leave
# dangling threads
executor_manager.join()
def test_hang_issue39205(self):
"""shutdown(wait=False) doesn't hang at exit with running futures.
See https://bugs.python.org/issue39205.
"""
executor_type = self.executor_type.__name__
start_method = self.context.get_start_method()
code = f"""if True:
from loky.process_executor import {executor_type}
from loky.backend import get_context
from tests._test_process_executor import sleep_and_print
context = get_context("{start_method}")
e = {executor_type}(3, context=context)
e.submit(sleep_and_print, 1.0, "apple")
e.shutdown(wait=False)
"""
stdout, stderr = check_subprocess_call(
[sys.executable, "-c", code], timeout=55
)
_assert_no_error(stderr)
assert stdout.strip() == "apple"
@classmethod
def _test_shutdown_and_kill_workers(cls, depth):
executor = cls.executor_type(
max_workers=2,
context=cls.context,
initializer=_executor_mixin.initializer_event,
initargs=(_executor_mixin._test_event,),
)
assert executor.submit(sleep_and_return, 0, 42).result() == 42
if depth >= 2:
_executor_mixin._test_event.set()
executor.submit(sleep_and_return, 30, 42).result()
executor.shutdown()
else:
f = executor.submit(cls._test_shutdown_and_kill_workers, depth + 1)
f.result()
def test_shutdown_and_kill_workers(self):
f = self.executor.submit(self._test_shutdown_and_kill_workers, 1)
# Wait for the nested executors to be started
_executor_mixin._test_event.wait()
# Forcefully shutdown the executor and kill the workers
t_start = time.time()
self.executor.shutdown(wait=True, kill_workers=True)
msg = "Failed to quickly kill nested executor"
t_shutdown = time.time() - t_start
assert t_shutdown < 5, msg
with pytest.raises(ShutdownExecutorError):
f.result()
_executor_mixin._check_subprocesses_number(self.executor, 0)
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED,
)
assert {future1} == done
assert {CANCELLED_FUTURE, future2} == not_done
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED,
)
assert {CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE} == finished
assert {future1} == pending
@classmethod
def wait_and_raise(cls, t):
_executor_mixin._test_event.wait(t)
raise Exception("this is an exception")
@classmethod
def wait_and_return(cls, t):
_executor_mixin._test_event.wait()
return True
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(self.wait_and_raise, 1.5)
future3 = self.executor.submit(time.sleep, 3)
def cb_done(f):
_executor_mixin._test_event.set()
future1.add_done_callback(cb_done)
finished, pending = futures.wait(
[future1, future2, future3], return_when=futures.FIRST_EXCEPTION
)
assert _executor_mixin._test_event.is_set()
assert {future1, future2} == finished
assert {future3} == pending
_executor_mixin._test_event.clear()
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[
SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1,
future2,
],
return_when=futures.FIRST_EXCEPTION,
)
assert {
SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1,
} == finished
assert {CANCELLED_FUTURE, future2} == pending
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1], return_when=futures.FIRST_EXCEPTION
)
assert {EXCEPTION_FUTURE} == finished
assert {future1} == pending
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[
SUCCESSFUL_FUTURE,
EXCEPTION_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1,
future2,
],
return_when=futures.ALL_COMPLETED,
)
assert {
SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2,
} == finished
assert not pending
def test_timeout(self):
# Make sure the executor has already started to avoid timeout happening
# before future1 returns
assert self.executor.submit(id_sleep, 42).result() == 42
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(self.wait_and_return, 5)
assert future1.result() == 42
finished, pending = futures.wait(
[
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1,
future2,
],
timeout=0.1,
return_when=futures.ALL_COMPLETED,
)
assert {
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1,
} == finished
assert {future2} == pending
_executor_mixin._test_event.set()
assert future2.result(timeout=10)
_executor_mixin._test_event.clear()
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(
futures.as_completed(
[
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1,
future2,
]
)
)
assert {
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1,
future2,
} == completed
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
with pytest.raises(futures.TimeoutError):
for future in futures.as_completed(
[
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1,
],
timeout=0,
):
completed_futures.add(future)
assert {
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
} == completed_futures
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
future1 = self.executor.submit(time.sleep, 0.1)
completed = list(futures.as_completed([future1, future1]))
assert len(completed) == 1
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
assert 256 == future.result()
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
assert 16 == future.result()
def test_map(self):
assert list(self.executor.map(pow, range(10), range(10))) == list(
map(pow, range(10), range(10))
)
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
assert next(i), 0 == 1
assert next(i), 0 == 1
with pytest.raises(ZeroDivisionError):
next(i)
def test_map_timeout(self):
results = []
with pytest.raises(futures.TimeoutError):
for i in self.executor.map(time.sleep, [0, 0, 5], timeout=1):
results.append(i)
assert [None, None] == results
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@pytest.mark.skipif(
platform.python_implementation() != "CPython"
or (sys.version_info >= (3, 8, 0) and sys.version_info < (3, 8, 2)),
reason="Underlying bug fixed upstream starting Python 3.8.2",
)
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
# This test has to be skipped on early Python 3.8 versions because of a
# low-level reference cycle inside the pickle module for early versions
# of Python 3.8 preventing stale references from being collected. See
# cloudpipe/cloudpickle#327 as well as
# https://bugs.python.org/issue39492
my_object = MyObject()
collect = threading.Event()
_ref = weakref.ref(my_object, lambda obj: collect.set()) # noqa
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = False
for _ in range(5):
if IS_PYPY:
gc.collect()
collected = collect.wait(timeout=1.0)
if collected:
return
assert collected, "Stale reference not collected within timeout."
def test_max_workers_negative(self):
for number in (0, -1):
with pytest.raises(ValueError) as infos:
self.executor_type(max_workers=number)
assert infos.value.args[0] == "max_workers must be greater than 0"
@pytest.mark.broken_pool
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
future = self.executor.submit(time.sleep, 30)
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
match = filter_match("SIGTERM")
with pytest.raises(TerminatedWorkerError, match=match):
future.result()
# Submitting other jobs fails as well.
with pytest.raises(TerminatedWorkerError, match=match):
self.executor.submit(pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
assert (
list(self.executor.map(pow, range(40), range(40), chunksize=6))
== ref
)
assert (
list(self.executor.map(pow, range(40), range(40), chunksize=50))
== ref
)
assert (
list(self.executor.map(pow, range(40), range(40), chunksize=40))
== ref
)
with pytest.raises(ValueError):
bad_map()
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with pytest.raises(Exception) as cm:
future.result()
exc = cm.value
assert type(exc) is RuntimeError
assert exc.args == (123,)
cause = exc.__cause__
assert type(cause) is process_executor._RemoteTraceback
assert "raise RuntimeError(123) # some comment" in cause.tb
#
# The following tests are new additions to the test suite originally
# backported from the Python 3 concurrent.futures package.
#
def _test_thread_safety(self, thread_idx, results, timeout=30.0):
try:
# submit a mix of very simple tasks with map and submit,
# cancel some of them and check the results
map_future_1 = self.executor.map(sqrt, range(40), timeout=timeout)
if thread_idx % 2 == 0:
# Make it more likely for scheduling threads to overtake one
# another
time.sleep(0.001)
submit_futures = [
self.executor.submit(time.sleep, 0.0001) for _ in range(20)
]
for i, f in enumerate(submit_futures):
if i % 2 == 0:
f.cancel()
map_future_2 = self.executor.map(sqrt, range(40), timeout=timeout)
assert list(map_future_1) == [sqrt(x) for x in range(40)]
assert list(map_future_2) == [sqrt(i) for i in range(40)]
for i, f in enumerate(submit_futures):
if i % 2 == 1 or not f.cancelled():
assert f.result(timeout=timeout) is None
results[thread_idx] = "ok"
except Exception as e:
# Ensure that py.test can report the content of the exception
# by raising it in the main test thread
results[thread_idx] = e
def test_thread_safety(self):
# Check that our process-pool executor can be shared to schedule work
# by concurrent threads
results = [None] * 10
threads = [
Thread(target=self._test_thread_safety, args=(i, results))
for i in range(len(results))
]
for t in threads:
t.start()
for t in threads:
t.join()
for result in results:
if isinstance(result, Exception):
raise result
assert result == "ok"
@classmethod
def return_inputs(cls, *args):
return args
def test_submit_from_callback(self):
collected = defaultdict(list)
executor = self.executor
def _collect_and_submit_next(future):
name, count = future.result()
collected[name].append(count)
if count > 0:
future = executor.submit(self.return_inputs, name, count - 1)
future.add_done_callback(_collect_and_submit_next)
# Start 3 concurrent callbacks chains
fa = executor.submit(self.return_inputs, "chain a", 100)
fa.add_done_callback(_collect_and_submit_next)
fb = executor.submit(self.return_inputs, "chain b", 50)
fb.add_done_callback(_collect_and_submit_next)
fc = executor.submit(self.return_inputs, "chain c", 60)
fc.add_done_callback(_collect_and_submit_next)
assert fa.result() == ("chain a", 100)
assert fb.result() == ("chain b", 50)
assert fc.result() == ("chain c", 60)
# Wait a maximum of 5s for the asynchronous callback chains to complete
patience = 500
while True:
if (
collected["chain a"] == list(range(100, -1, -1))
and collected["chain b"] == list(range(50, -1, -1))
and collected["chain c"] == list(range(60, -1, -1))
):
# the recursive callback chains have completed successfully
break
elif patience < 0:
raise AssertionError(
f"callback submit chains stalled at: {collected!r}"
)
else:
patience -= 1
time.sleep(0.01)
@pytest.mark.timeout(60)
def test_worker_timeout(self):
self.executor.shutdown(wait=True)
self.check_no_running_workers(patience=5)
timeout = getattr(self, "min_worker_timeout", 0.01)
try:
self.executor = self.executor_type(
max_workers=4, context=self.context, timeout=timeout
)
except NotImplementedError as e:
self.skipTest(str(e))
for _ in range(5):
with warnings.catch_warnings():
# It's ok to get a warning about the worker interrupted by the
# short timeout while tasks are pending in the queue on
# overloaded CI hosts.
warnings.simplefilter("ignore", category=UserWarning)
# Trigger worker spawn for lazy executor implementations
for _ in self.executor.map(id, range(8)):
pass
# Check that all workers shutdown (via timeout) when waiting a bit:
# note that the effective time for a Python process to completely
# shutdown can vary a lot especially on loaded CI machines with and
# the atexit callbacks that writes test coverage data to disk.
# Let's be patient.
self.check_no_running_workers(patience=5)
@classmethod
def reducer_in(cls, obj):
return MyObject, (obj.value + 5,)
@classmethod
def reducer_out(cls, obj):
return MyObject, (7 * obj.value,)
def test_serialization(self):
"""Test custom serialization for process_executor"""
self.executor.shutdown(wait=True)
# Use non commutative operation to check correct order
job_reducers = {}
job_reducers[MyObject] = self.reducer_in
result_reducers = {}
result_reducers[MyObject] = self.reducer_out
# Create a new executor to ensure that we did not mess with the
# existing module level serialization
executor = self.executor_type(
max_workers=2,
context=self.context,
job_reducers=job_reducers,
result_reducers=result_reducers,
)
self.executor = self.executor_type(max_workers=2, context=self.context)
obj = MyObject(1)
try:
ret_obj_custom = executor.submit(self.return_inputs, obj).result()[
0
]
ret_obj = self.executor.submit(self.return_inputs, obj).result()[0]
assert ret_obj.value == 1
assert ret_obj_custom.value == 42
finally:
executor.shutdown(wait=True)
@classmethod
def _test_max_depth(cls, max_depth=10, kill_workers=False, ctx=None):
if max_depth == 0:
return 42
executor = cls.executor_type(1, context=ctx)
f = executor.submit(cls._test_max_depth, max_depth - 1, ctx)
try:
return f.result()
finally:
executor.shutdown(wait=True, kill_workers=kill_workers)
@pytest.mark.parametrize("kill_workers", [True, False])
def test_max_depth(self, kill_workers):
from loky.process_executor import MAX_DEPTH
if self.context.get_start_method() == "fork":
# For 'fork', we do not allow nested process as the threads ends
# up in messy states
with pytest.raises(LokyRecursionError):
self._test_max_depth(max_depth=2, ctx=self.context)
return
assert (
self._test_max_depth(
max_depth=MAX_DEPTH,
kill_workers=kill_workers,
ctx=self.context,
)
== 42
)
with pytest.raises(LokyRecursionError):
self._test_max_depth(
max_depth=MAX_DEPTH + 1,
kill_workers=kill_workers,
ctx=self.context,
)
@pytest.mark.high_memory
@pytest.mark.skipif(
sys.maxsize < 2**32,
reason="Test requires a 64 bit version of Python",
)
@pytest.mark.skipif(
sys.version_info < (3, 8),
reason="Python version does not support pickling objects of size > 2 ** 31GB",
)
def test_no_failure_on_large_data_send(self):
data = b"\x00" * int(2.2e9)
self.executor.submit(id, data).result()
@pytest.mark.high_memory
@pytest.mark.skipif(
sys.maxsize < 2**32,
reason="Test requires a 64 bit version of Python",
)
@pytest.mark.skipif(
sys.version_info >= (3, 8),
reason="Python version supports pickling objects of size > 2 ** 31GB",
)
def test_expected_failure_on_large_data_send(self):
data = b"\x00" * int(2.2e9)
with pytest.raises(RuntimeError):
self.executor.submit(id, data).result()
def test_memory_leak_protection(self):
pytest.importorskip("psutil") # cannot work without psutil
self.executor.shutdown(wait=True)
executor = self.executor_type(1, context=self.context)
def _leak_some_memory(size=int(3e6), delay=0.001):
"""function that leaks some memory"""
from loky import process_executor
process_executor._MEMORY_LEAK_CHECK_DELAY = 0.1
if getattr(os, "_loky_leak", None) is None:
os._loky_leak = []
os._loky_leak.append(b"\x00" * size)
# Leave enough time for the memory leak detector to kick-in:
# by default the process does not check its memory usage
# more than once per second.
time.sleep(delay)
leaked_size = sum(len(buffer) for buffer in os._loky_leak)
return os.getpid(), leaked_size
with pytest.warns(UserWarning, match="memory leak"):
# Total run time should be 3s which is way over the 1s cooldown
# period between two consecutive memory checks in the worker.
futures = [executor.submit(_leak_some_memory) for _ in range(300)]
executor.shutdown(wait=True)
results = [f.result() for f in futures]
# The pid of the worker has changed when restarting the worker
first_pid, last_pid = results[0][0], results[-1][0]
assert first_pid != last_pid
# The restart happened after 100 MB of leak over the
# default process size + what has leaked since the last
# memory check.
for _, leak_size in results:
assert leak_size / 1e6 < 650
def test_reference_cycle_collection(self):
# make the parallel call create a reference cycle and make
# a weak reference to be able to track the garbage collected objects
self.executor.shutdown(wait=True)
executor = self.executor_type(1, context=self.context)
def _create_cyclic_reference(delay=0.001):
"""function that creates a cyclic reference"""
from loky import process_executor
process_executor._USE_PSUTIL = False
process_executor._MEMORY_LEAK_CHECK_DELAY = 0.1
class A:
def __init__(self, size=int(1e6)):
self.data = b"\x00" * size
self.a = self
if getattr(os, "_loky_cyclic_weakrefs", None) is None:
os._loky_cyclic_weakrefs = []
a = A()
time.sleep(delay)
os._loky_cyclic_weakrefs.append(weakref.ref(a))
return sum(1 for r in os._loky_cyclic_weakrefs if r() is not None)
# Total run time should be 3s which is way over the 1s cooldown
# period between two consecutive memory checks in the worker.
futures = [
executor.submit(_create_cyclic_reference) for _ in range(300)
]
executor.shutdown(wait=True)
max_active_refs_count = max(f.result() for f in futures)
assert max_active_refs_count < 150
assert max_active_refs_count != 1
@pytest.mark.broken_pool
def test_exited_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
print(self.context.get_start_method())
match = filter_match(r"EXIT\(42\)")
future = self.executor.submit(c_exit, 42)
with pytest.raises(TerminatedWorkerError, match=match):
future.result()
# Submitting other jobs fails as well.
with pytest.raises(TerminatedWorkerError, match=match):
self.executor.submit(pow, 2, 8)
@staticmethod
def _test_child_env(var_name):
return os.environ.get(var_name, "unset")
def test_child_env_executor(self):
# Test that for loky context, setting argument env correctly overwrite
# the environment of the child process.
if self.context.get_start_method() != "loky":
pytest.skip("Only work with loky context")
var_name = "loky_child_env_executor"
var_value = "variable set"
executor = self.executor_type(1, env={var_name: var_value})
var_child = executor.submit(self._test_child_env, var_name).result()
assert var_child == var_value
executor.shutdown(wait=True)
def test_viztracer_profiler(self):
# Check that viztracer profiler is initialzed in workers when
# installed.
viztracer = pytest.importorskip("viztracer")
def check_viztracer_active():
tracer = viztracer.get_tracer()
if tracer is None:
return False
return tracer.enable
active_in_main_process = check_viztracer_active()
with self.executor_type(1, context=self.context) as e:
active_in_child_process = e.submit(check_viztracer_active).result()
assert active_in_main_process == active_in_child_process
if not active_in_main_process:
tracer = viztracer.VizTracer()
try:
tracer.start()
with self.executor_type(1, context=self.context) as e:
assert e.submit(check_viztracer_active).result()
finally:
tracer.stop()
# Once the tracer has been stopped, should be no side effect on
# workers started in new executors.
with self.executor_type(1, context=self.context) as e:
assert not e.submit(check_viztracer_active).result()
def test_viztracer_profiler_with_custom_init(self):
# Check that viztracer profiler is initialzed in workers when
# installed.
viztracer = pytest.importorskip("viztracer")
# Make sure the auto-viztracer initialization works even when
# the call pass their own init.
def check_viztracer_active_and_custom_init():
assert loky._custom_global_var == 42
tracer = viztracer.get_tracer()
if tracer is None:
return False
return tracer.enable
existing_tracer = viztracer.get_tracer()
if existing_tracer is not None and existing_tracer.enable:
pytest.skip("Cannot run this test if viztracer is active")
tracer = viztracer.VizTracer()
try:
tracer.start()
with self.executor_type(
1, context=self.context, initializer=_custom_initializer
) as e:
assert e.submit(
check_viztracer_active_and_custom_init
).result()
finally:
tracer.stop()
def test_exception_cause_with_tblib(self):
"Ensure tampering with exception pickling do not break __cause__ propagation"
tblib_pickling_support = pytest.importorskip("tblib.pickling_support")
error_message = "This is the error message"
def raise_value_error():
tblib_pickling_support.install()
raise ValueError(error_message)
executor = self.executor_type(max_workers=2)
f = executor.submit(raise_value_error)
try:
f.result()
except ValueError as e:
assert e.__cause__ is not None
assert error_message in str(e.__cause__)
executor.shutdown(wait=True)
def _custom_initializer():
"""_custom_initializer is module function to be picklable
This is necessary for executor implementations that do not
use cloudpickle to pickle the initializer.
"""
loky._custom_global_var = 42
|
540843dbe6db7f4b225d6211108f9cba963d7fcf
|
f1c2e4b3147af77e23306f841610aafd6db1c6b0
|
/submarine-sdk/pysubmarine/example/tracking.py
|
678e5986ff2e980f4c4a3103ff15ec426ae07a34
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"MIT",
"CDDL-1.1",
"Classpath-exception-2.0"
] |
permissive
|
apache/submarine
|
a2927f5f4f7f5faff4701139f2f0f88a98195e7f
|
0c10613f39b707d5e446c515c12fa28295c8052e
|
refs/heads/master
| 2023-08-30T14:35:43.145942
| 2023-08-20T00:19:54
| 2023-08-24T23:50:49
| 209,459,144
| 663
| 269
|
Apache-2.0
| 2023-09-03T09:05:06
| 2019-09-19T04:00:17
|
Java
|
UTF-8
|
Python
| false
| false
| 1,201
|
py
|
tracking.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from sklearn.linear_model import LogisticRegression
import submarine
if __name__ == "__main__":
X = np.array([-2, -1, 0, 1, 2, 1]).reshape(-1, 1)
y = np.array([0, 0, 1, 1, 1, 0])
lr = LogisticRegression(solver="liblinear", max_iter=100)
submarine.log_param("max_iter", 100)
lr.fit(X, y)
score = lr.score(X, y)
print(f"Score: {score}")
submarine.log_metric("score", score)
|
d5f77dce1b071872e8c442d8b43bcb07adff3b23
|
c26483bc1399e7879471a9e53d0288cb2c756088
|
/onnxmltools/convert/coreml/operator_converters/neural_network/Softmax.py
|
8c9987f64ad82066786bab16c1b3a2d9a6b41ad5
|
[
"Apache-2.0"
] |
permissive
|
onnx/onnxmltools
|
6782d9e1d2c75be7618b1378405d31198a310027
|
024a62f6915e6c3b9e040befaf058c7e60c271de
|
refs/heads/main
| 2023-09-04T04:57:10.943548
| 2023-08-28T16:43:37
| 2023-08-28T16:43:37
| 121,798,175
| 827
| 189
|
Apache-2.0
| 2023-09-13T16:07:20
| 2018-02-16T20:37:33
|
Python
|
UTF-8
|
Python
| false
| false
| 452
|
py
|
Softmax.py
|
# SPDX-License-Identifier: Apache-2.0
from ....common._registration import register_converter
def convert_softmax(scope, operator, container):
op_type = "Softmax"
inputs = [variable.full_name for variable in operator.inputs]
outputs = [variable.full_name for variable in operator.outputs]
attrs = {"name": operator.full_name}
container.add_node(op_type, inputs, outputs, **attrs)
register_converter("softmax", convert_softmax)
|
b4618d4b432984edd069141ce566132ba419af1e
|
da8471ad2f90a3efa31acb0c986020357cdb5e4c
|
/confidant/scripts/migrate.py
|
3d60d0ad415fbbce26e77b19099404b05493e559
|
[
"Apache-2.0"
] |
permissive
|
lyft/confidant
|
af18cc7085303ee5bab873c78567e14ae48630ab
|
8033824e0b3c156ee5588e5b31f8dff8e421a01e
|
refs/heads/master
| 2023-09-01T20:46:07.051295
| 2023-08-21T17:01:49
| 2023-08-21T17:01:49
| 42,324,225
| 1,918
| 146
|
Apache-2.0
| 2023-09-06T21:20:59
| 2015-09-11T18:02:12
|
Python
|
UTF-8
|
Python
| false
| false
| 3,822
|
py
|
migrate.py
|
import sys
import logging
from flask_script import Command
from confidant.models.blind_credential import BlindCredential
from confidant.models.service import Service
import json
import six
from pynamodb.attributes import Attribute, UnicodeAttribute
from pynamodb.constants import STRING_SET
from pynamodb.models import Model
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
def is_old_unicode_set(values):
if not values:
return False
return sum([x.startswith('"') for x in values]) > 0
class SetMixin(object):
"""
Adds (de)serialization methods for sets
"""
def serialize(self, value):
"""
Serializes a set
Because dynamodb doesn't store empty attributes,
empty sets return None
"""
if value is not None:
try:
iter(value)
except TypeError:
value = [value]
if len(value):
return [json.dumps(val) for val in sorted(value)]
return None
def deserialize(self, value):
"""
Deserializes a set
"""
if value and len(value):
return set([json.loads(val) for val in value])
class NewUnicodeSetAttribute(SetMixin, Attribute):
"""
A unicode set
"""
attr_type = STRING_SET
null = True
def element_serialize(self, value):
"""
This serializes unicode / strings out as unicode strings.
It does not touch the value if it is already a unicode str
:param value:
:return:
"""
if isinstance(value, six.text_type):
return value
return six.u(str(value))
def element_deserialize(self, value):
return value
def serialize(self, value):
if value is not None:
try:
iter(value)
except TypeError:
value = [value]
if len(value):
return [self.element_serialize(val) for val in sorted(value)]
return None
def deserialize(self, value):
if value and len(value):
return set([self.element_deserialize(val) for val in value])
class GeneralCredentialModel(Model):
class Meta(BlindCredential.Meta):
pass
id = UnicodeAttribute(hash_key=True)
credential_keys = NewUnicodeSetAttribute(default=set([]), null=True)
class GeneralServiceModel(Model):
class Meta(Service.Meta):
pass
id = UnicodeAttribute(hash_key=True)
credentials = NewUnicodeSetAttribute(default=set(), null=True)
blind_credentials = NewUnicodeSetAttribute(default=set(), null=True)
class MigrateBlindCredentialSetAttribute(Command):
def run(self):
total = 0
fail = 0
logger.info('Migrating UnicodeSetAttribute in BlindCredential')
for cred in BlindCredential.data_type_date_index.query(
'blind-credential'):
cred.save()
new_cred = GeneralCredentialModel.get(cred.id)
if is_old_unicode_set(new_cred.credential_keys):
fail += 1
total += 1
logger.info("Fail: {}, Total: {}".format(fail, total))
class MigrateServiceSetAttribute(Command):
def run(self):
total = 0
fail = 0
logger.info('Migrating UnicodeSetAttribute in Service')
for service in Service.data_type_date_index.query(
'service'):
service.save()
new_service = GeneralServiceModel.get(service.id)
if (is_old_unicode_set(new_service.credentials) or
is_old_unicode_set(new_service.blind_credentials)):
fail += 1
total += 1
logger.info("Fail: {}, Total: {}".format(fail, total))
|
7a94e8766a6dc91e72f3a449ed837196dde7baa8
|
c1eb1cf7ab9b416ecf455c2f75b46ab21ee8f8b4
|
/library/api/tFlask.py
|
29ed643edb0b5d2619e3733510e84fe708b3c9fe
|
[
"Apache-2.0"
] |
permissive
|
JunManYuanLong/TcloudServer
|
1b9b45765cb9bfb9bcbde505b80378be657b61a6
|
6b26d7d2d31c0fa8d3954370f8714157f0f610d0
|
refs/heads/master
| 2022-07-30T19:35:07.776461
| 2022-01-26T02:34:16
| 2022-01-26T02:34:16
| 284,940,707
| 359
| 86
|
NOASSERTION
| 2021-04-01T06:10:45
| 2020-08-04T09:54:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,548
|
py
|
tFlask.py
|
from flask import Flask, jsonify
from library.api.db import db
# from library.api.db import cache
from library.api.parse import format_response
from library.api.tMiddleware import t_middleware
from library.tlogger import logger_create
try:
from public_config import SERVER_ENV
except ImportError:
SERVER_ENV = 'product'
class TFlask(Flask):
def make_response(self, rv):
if isinstance(rv, dict):
rv = jsonify(format_response(rv))
# # url关于最后面 / 的308重定向
# elif getattr(rv, 'code') == 308:
# new_rv = rv
# else:
# raise DataTypeErrorException
return super().make_response(rv)
def run(self, host='0.0.0.0', port=5000, debug=True, workers=None, load_dotenv=True, server_env=None, **options):
if server_env == 'dev' or SERVER_ENV == 'dev':
super().run(host=host, port=port, debug=debug)
else:
import multiprocessing
from gunicorn.app.base import BaseApplication
class Application(BaseApplication):
def __init__(self, app, local_options=None):
self.options = local_options or {}
self.application = app
super(Application, self).__init__()
def load_config(self):
config = dict([(key, value) for key, value in self.options.items()
if key in self.cfg.settings and value is not None])
for key, value in config.items():
self.cfg.set(key.lower(), value)
def load(self):
return self.application
def init(self, parser, opts, args):
super(Application, self).init(parser, opts, args)
current_options = {
'bind': f'{host}:{port}',
'workers': workers or (multiprocessing.cpu_count() * 2) + 1,
'worker_class': 'gevent',
'timeout': '1800',
}
Application(self, current_options).run()
def register_logger(app, config):
logger_create(config.SERVICE_NAME, app)
def register_extensions(app):
db.init_app(app)
# cache.init_app(app)
def tflask(config):
app = TFlask(config.SERVICE_NAME)
# 只为注入mysql链接,使用config中内容没有使用current_app.config
app.config.from_object(config)
t_middleware(app)
register_logger(app, config)
register_extensions(app)
return app
|
b512b3a21e9eaafc571ed79a9860f36cc5b41dfc
|
5da5473ff3026165a47f98744bac82903cf008e0
|
/packages/google-cloud-securitycenter/google/cloud/securitycenter_v1/types/cloud_dlp_inspection.py
|
e3e863f45d9436f67be889f5c82e32ecfebb05ff
|
[
"Apache-2.0"
] |
permissive
|
googleapis/google-cloud-python
|
ed61a5f03a476ab6053870f4da7bc5534e25558b
|
93c4e63408c65129422f65217325f4e7d41f7edf
|
refs/heads/main
| 2023-09-04T09:09:07.852632
| 2023-08-31T22:49:26
| 2023-08-31T22:49:26
| 16,316,451
| 2,792
| 917
|
Apache-2.0
| 2023-09-14T21:45:18
| 2014-01-28T15:51:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
cloud_dlp_inspection.py
|
# -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import MutableMapping, MutableSequence
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.securitycenter.v1",
manifest={
"CloudDlpInspection",
},
)
class CloudDlpInspection(proto.Message):
r"""Details about the Cloud Data Loss Prevention (Cloud DLP) `inspection
job <https://cloud.google.com/dlp/docs/concepts-job-triggers>`__
that produced the finding.
Attributes:
inspect_job (str):
Name of the inspection job, for example,
``projects/123/locations/europe/dlpJobs/i-8383929``.
info_type (str):
The type of information (or
`infoType <https://cloud.google.com/dlp/docs/infotypes-reference>`__)
found, for example, ``EMAIL_ADDRESS`` or ``STREET_ADDRESS``.
info_type_count (int):
The number of times Cloud DLP found this
infoType within this job and resource.
full_scan (bool):
Whether Cloud DLP scanned the complete
resource or a sampled subset.
"""
inspect_job: str = proto.Field(
proto.STRING,
number=1,
)
info_type: str = proto.Field(
proto.STRING,
number=2,
)
info_type_count: int = proto.Field(
proto.INT64,
number=3,
)
full_scan: bool = proto.Field(
proto.BOOL,
number=4,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
4d2bf795958f5e1299fa83439a0aa8e5c5274dd2
|
568fa58296378fa129ab3349adf010daa44ed45b
|
/tests/common/test_op/ascend/elemwise_sum.py
|
b7d142874576925e47df7becae7a3a020b57fc0a
|
[
"Apache-2.0",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LLVM-exception",
"BSD-2-Clause"
] |
permissive
|
mindspore-ai/akg
|
37f471badc66de6a831f1f45ad84344f34d23ef2
|
99f33858d6972741748cbfc9ab0bf9600428fef7
|
refs/heads/master
| 2023-07-25T23:03:17.672665
| 2023-07-11T07:33:57
| 2023-07-11T07:33:57
| 274,077,856
| 319
| 36
|
Apache-2.0
| 2021-12-30T13:43:08
| 2020-06-22T08:09:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,695
|
py
|
elemwise_sum.py
|
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import akg
import akg.lang.ascend
import akg.tvm
import akg.utils as utils
from akg.utils import custom_tiling as ct_util
from akg.utils.kernel_exec import debug_mode
elemwise_sum_ad_set_dim_map = {
str(([3, 3], "float16")): ([(1, 0)]),
str(([3, 3], "float32")): ([(1, 0)]),
}
def elemwise_sum_ad_set_dim_func(a, b):
"""setdim function"""
key = []
key.append(tuple(a.shape))
key.append(a.dtype)
hash_key = str(tuple(key))
if hash_key in elemwise_sum_ad_set_dim_map.keys():
return ct_util.set_dims(elemwise_sum_ad_set_dim_map[hash_key]), hash_key
else:
return "", hash_key
@utils.check_input_type(akg.tvm.tensor.Tensor, akg.tvm.tensor.Tensor, (str, type(None)))
def elemwise_sum(a, b, target=utils.CCE):
"""
Element-wise sum data.
Args:
a (tvm.tensor.Tensor): Input `a` of type float16 or float32.
b (tvm.tensor.Tensor): Input `b` of type float16 or float32.
Returns:
tvm.tensor.Tensor, has the same shape and type as inputs.
"""
utils.check_shape(a)
utils.check_shape(b)
dim_info, _ = elemwise_sum_ad_set_dim_func(a, b)
attrs = {"dim": dim_info}
shape = a.shape
c = akg.tvm.compute(shape, lambda *indices: a(*indices) + b(*indices), name="b")
return c, attrs
def elemwise_sum_manual_schedule(input_shape, polyhedral=False, attrs=None):
"""manually schedule"""
b = akg.tvm.placeholder(input_shape, dtype='float16', name="b")
c = akg.tvm.placeholder(input_shape, dtype='float16', name="c")
a = akg.tvm.compute(input_shape, lambda *indices: b(*indices) + c(*indices))
ss = akg.tvm.create_schedule([a.op])
ss.cache_read(b, "local.UB", [a])
ss.cache_read(c, "local.UB", [a])
ss.cache_write(a, "local.UB")
ss[a].set_scope("local.UB")
with akg.build_config(add_lower_pass=debug_mode(0), dump_pass_ir=True):
mod = akg.build(ss,
[b, c, a],
"cce",
name="test_manual_schedule",
attrs=attrs,
polyhedral=polyhedral)
return mod
|
d6451816c6a3c153cecf6b044599c81ef0875a42
|
3081774fbe534fc6d9d40de68f368eda09bad4fa
|
/ServerInstall.py
|
de3da9b5dee1562c8481fff3a5c1531bb35fe5b3
|
[] |
no_license
|
AngelSecurityTeam/Recreator-Phishing
|
c916d4039fbda5c5b45ad7b6ec7e14f72d7391e4
|
0ef1e7c5c9551247399ba24a27022fb881647700
|
refs/heads/master
| 2023-04-03T13:23:04.283354
| 2021-04-17T22:38:06
| 2021-04-17T22:38:06
| 208,916,591
| 287
| 70
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,484
|
py
|
ServerInstall.py
|
from subprocess import check_output
import os
from platform import system as systemos, architecture
from wget import download
# NGROK
def Ngrok():
if True:
if 'Android' in str(check_output(('uname', '-a'))) or 'arm' in str(check_output(('uname', '-a'))):
filename = 'ngrok-stable-linux-arm.zip'
else:
ostype = systemos().lower()
if architecture()[0] == '64bit':
filename = 'ngrok-stable-{0}-amd64.zip'.format(ostype)
else:
filename = 'ngrok-stable-{0}-386.zip'.format(ostype)
url = 'https://bin.equinox.io/c/4VmDzA7iaHb/' + filename
download(url)
os.system('unzip ' + filename)
os.system('rm -Rf ' + filename)
os.system('clear')
#LOCALXPOSE
def Localxpose():
if True:
if 'Android' in str(check_output(('uname', '-a'))) or 'arm' in str(check_output(('uname', '-a'))):
filename = 'loclx-linux-arm.zip'
else:
ostype = systemos().lower()
if architecture()[0] == '64bit':
filename = 'loclx-linux-amd64.zip'.format(ostype)
else:
filename = 'loclx-linux-386.zip'.format(ostype)
url = 'https://lxpdownloads.sgp1.digitaloceanspaces.com/cli/'+filename
download(url)
os.system('unzip loclx*.zip')
os.system('rm loclx*.zip')
os.system("mv loclx* loclx")
os.system('clear')
Ngrok()
Localxpose()
|
c480c4767f25be96f724cf45cb42eb5fe4ac977f
|
2853845c003d03db22f67c3303fa1ec333180ae7
|
/test/trainer/test_step_trainer.py
|
77771364615be07cb74cfacbf8bd32e96efe3711
|
[
"LicenseRef-scancode-proprietary-license",
"Apache-2.0"
] |
permissive
|
bytedance/fedlearner
|
fc1dd2ba2ec88092e83a32732eccea52451ce552
|
436e4959952c970917ee8f47b920f0a76cd4dd05
|
refs/heads/master
| 2023-08-14T23:01:02.875453
| 2023-05-23T03:44:03
| 2023-05-23T03:44:03
| 235,348,659
| 893
| 243
|
Apache-2.0
| 2023-06-08T07:37:18
| 2020-01-21T13:26:35
|
Python
|
UTF-8
|
Python
| false
| false
| 13,110
|
py
|
test_step_trainer.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import os
import socket
import threading
import shutil
import unittest
import numpy as np
import tensorflow as tf
import fedlearner.trainer as flt
run_step = 2
batch_size = 256
def union_shuffle(a, b):
assert len(a) == len(a)
p = np.random.permutation(len(a))
return a[p], b[p]
(x, y), _ = tf.keras.datasets.mnist.load_data()
x, y = union_shuffle(x, y)
total_features = x.reshape(x.shape[0], -1).astype(np.float32) / 255.0
total_labels = y.astype(np.int64)
drop_count = total_features.shape[0] % (run_step * batch_size)
if drop_count > 0:
total_features = total_features[:-drop_count]
total_labels = total_labels[:-drop_count]
total_leader_features = total_features[:, :total_features.shape[1]//2]
total_follower_features = total_features[:, total_features.shape[1]//2:]
step_leader_features = np.split(total_leader_features, run_step)
step_follower_features = np.split(total_follower_features, run_step)
step_labels = np.split(total_labels, run_step)
class HookContext():
def __init__(self):
self._saved_value = dict()
@property
def saved_value(self):
return self._saved_value
def create_logging_and_save_tensor_hook(self,
tensors,
every_n_iter=None,
every_n_secs=None,
at_end=False,
formatter=None):
return LoggingAndSaveTensorHook(tensors,
every_n_iter,
every_n_secs,
at_end,
formatter,
self._saved_value)
class LoggingAndSaveTensorHook(tf.train.LoggingTensorHook):
def __init__(self,
tensors,
every_n_iter=None,
every_n_secs=None,
at_end=False,
formatter=None,
saved_value=None):
super(LoggingAndSaveTensorHook, self).__init__(tensors,
every_n_iter,
every_n_secs,
at_end,
formatter)
self._saved_value = saved_value if saved_value is not None else dict()
def _log_tensors(self, tensor_values):
for tag in self._tag_order:
if tag not in self._saved_value:
self._saved_value[tag] = []
self._saved_value[tag].append(tensor_values[tag])
super(LoggingAndSaveTensorHook, self)._log_tensors(tensor_values)
def create_input_fn(features, labels):
def input_fn(bridge, master):
def mapfunc(x, y):
feature = {
"x": x,
"example_id": tf.constant(0)
}
return feature, y
return tf.data.Dataset.from_tensor_slices((features, labels)) \
.map(mapfunc) \
.batch(batch_size, drop_remainder=True)
return input_fn
def create_leader_model_fn(hook_context):
def leader_model_fn(model, features, labels, mode):
x, y = features["x"], labels
w1 = tf.get_variable("w1",
shape=[x.shape[1], 32],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(seed=0))
b1 = tf.get_variable("b1",
shape=[32],
dtype=tf.float32,
initializer=tf.zeros_initializer())
w2 = tf.get_variable("w2",
shape=[32 * 2, 10],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(seed=0))
b2 = tf.get_variable("b2",
shape=[10],
dtype=tf.float32,
initializer=tf.zeros_initializer())
act1_l = tf.nn.relu(tf.nn.bias_add(tf.matmul(x, w1), b1))
if mode == tf.estimator.ModeKeys.TRAIN:
act1_f = model.recv("act1_f", tf.float32, require_grad=True)
else:
act1_f = model.recv("act1_f", tf.float32, require_grad=False)
act1 = tf.concat([act1_l, act1_f], axis=1)
logits = tf.nn.bias_add(tf.matmul(act1, w2), b2)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
logits=logits)
loss = tf.math.reduce_mean(loss)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer()
train_op = model.minimize(
optimizer, loss, global_step=tf.train.get_or_create_global_step())
correct = tf.nn.in_top_k(predictions=logits, targets=y, k=1)
acc = tf.reduce_mean(input_tensor=tf.cast(correct, tf.float32))
logging_hook = hook_context.create_logging_and_save_tensor_hook(
{"loss" : loss, "acc" : acc}, every_n_iter=1)
return model.make_spec(
mode=mode, loss=loss, train_op=train_op,
training_hooks=[logging_hook])
else:
classes = tf.argmax(logits, axis=1)
acc_pair = tf.metrics.accuracy(y, classes)
return model.make_spec(
mode=mode, loss=loss, eval_metric_ops={'accuracy': acc_pair})
return leader_model_fn
def follower_model_fn(model, features, labels, mode):
x, _ = features["x"], labels
w1 = tf.get_variable("w1",
shape=[x.shape[1], 32],
dtype=tf.float32,
initializer=tf.random_uniform_initializer(seed=0))
b1 = tf.get_variable("b1",
shape=[32],
dtype=tf.float32,
initializer=tf.zeros_initializer())
act1_f = tf.nn.relu(tf.nn.bias_add(tf.matmul(x, w1), b1))
gact1_f = model.send("act1_f", act1_f, require_grad=True)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer()
train_op = model.minimize(
optimizer,
act1_f,
grad_loss=gact1_f,
global_step=tf.train.get_or_create_global_step())
return model.make_spec(mode,
loss=tf.math.reduce_mean(act1_f),
train_op=train_op)
else:
model.send("act1_f", act1_f, require_grad=False)
fake_loss = tf.reduce_mean(act1_f)
return model.make_spec(mode=mode, loss=fake_loss)
class _CreateParamaterServer():
def __init__(self):
self._server = tf.train.Server.create_local_server()
@property
def address(self):
return self._server.target[7:]
def stop(self):
del(self._server)
class TestStepTrain(unittest.TestCase):
def setUp(self):
self._current_dir = os.path.dirname(__file__)
self._model_dir = os.path.join(self._current_dir, 'tmp_model')
shutil.rmtree(self._model_dir, ignore_errors=True)
self._leader_output = os.path.join(self._model_dir, 'leader')
self._follower_output = os.path.join(self._model_dir, 'follower')
self._leader_bridge_addr = _get_free_tcp_address()
self._follower_bridge_addr = _get_free_tcp_address()
def tearDown(self):
shutil.rmtree(self._model_dir, ignore_errors=True)
def _run_train(self,
hook_context,
leader_features,
follower_features,
labels,
leader_checkpoint_path=None,
follower_checkpoint_path=None,
save_checkpoint_step=100):
""" train without checkpoint"""
parser = flt.trainer_worker.create_argument_parser()
# leader
leader_ps1 = _CreateParamaterServer()
leader_raw_args = (
"--local-addr", self._leader_bridge_addr,
"--peer-addr", self._follower_bridge_addr,
"--data-path", self._current_dir, # noused
"--ps-addrs", ",".join([leader_ps1.address]),
"--loglevel", "debug",
)
if leader_checkpoint_path:
leader_raw_args += \
("--checkpoint-path", leader_checkpoint_path)
leader_raw_args += \
("--save-checkpoint-steps", str(save_checkpoint_step))
leader_trainer = threading.Thread(
target=flt.trainer_worker.train,
args=("leader",
parser.parse_args(leader_raw_args),
create_input_fn(leader_features, labels),
create_leader_model_fn(hook_context),
None)
)
# follower
follower_ps1 = _CreateParamaterServer()
follower_raw_args = (
"--local-addr", self._follower_bridge_addr,
"--peer-addr", self._leader_bridge_addr,
"--data-path", self._current_dir, # noused
"--ps-addrs", ",".join([follower_ps1.address]),
"--loglevel", "debug",
)
if follower_checkpoint_path:
follower_raw_args += \
("--checkpoint-path",follower_checkpoint_path)
follower_raw_args += \
("--save-checkpoint-steps", str(save_checkpoint_step))
follower_trainer = threading.Thread(
target=flt.trainer_worker.train,
args=("follower",
parser.parse_args(follower_raw_args),
create_input_fn(follower_features, labels),
follower_model_fn,
None)
)
leader_trainer.start()
follower_trainer.start()
leader_trainer.join()
follower_trainer.join()
leader_ps1.stop()
follower_ps1.stop()
def test_train(self):
# run all in one step
total_hook_context = HookContext()
self._run_train(total_hook_context,
total_leader_features,
total_follower_features,
total_labels)
print(total_hook_context.saved_value)
# run step by step
step_hook_context = HookContext()
leader_checkpoint_path = \
os.path.join(self._leader_output, "checkpoints")
follower_checkpoint_path = \
os.path.join(self._follower_output, "checkpoints")
for i in range(run_step):
leader_features = step_leader_features[i]
follower_features = step_follower_features[i]
labels = step_labels[i]
self._run_train(step_hook_context,
leader_features,
follower_features,
labels,
leader_checkpoint_path=leader_checkpoint_path,
follower_checkpoint_path=follower_checkpoint_path,
save_checkpoint_step=200)
print(step_hook_context.saved_value)
# check
assert len(total_hook_context.saved_value) == \
len(step_hook_context.saved_value)
for tag in total_hook_context.saved_value:
assert tag in step_hook_context.saved_value
assert len(total_hook_context.saved_value[tag]) == \
len(step_hook_context.saved_value[tag])
print("%stag: %s%s"%("*"*32, tag, "*"*32))
print("%15s%20s%20s%20s"%("index(step)", "total", "part", "diff"))
step = 0
count = len(step_hook_context.saved_value[tag]) // run_step
for i, v1 in enumerate(total_hook_context.saved_value[tag]):
if i % count == 0:
step += 1
v2 = step_hook_context.saved_value[tag][i]
print("%15s%20f%20f%20f"% \
(str(i)+"("+str(step)+")", v1, v2, v1-v2))
assert v1 == v2
print("%s"%("*"*75))
print()
def _get_free_tcp_address():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', 0))
host, port = s.getsockname()
s.close()
return "%s:%d"%(host, port)
if __name__ == "__main__":
unittest.main()
|
fac5944ed45d470dfc9b4fcfb8be9dbe98e14416
|
b8441dc1987be9e64fa3081d456b2a3060ec44d1
|
/mars/tensor/reshape/tests/test_reshape.py
|
bf0a25944210d2e0faa223c83b723afbc942f0a0
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"CC0-1.0",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mars-project/mars
|
f99fefbce999d58a9249bc72046787a9731c9c73
|
c36c53fa22e10ef9477d9c454401a2f281375f31
|
refs/heads/master
| 2023-07-23T00:23:55.133015
| 2023-07-03T11:44:54
| 2023-07-03T11:44:54
| 160,543,708
| 2,704
| 362
|
Apache-2.0
| 2023-09-11T07:57:35
| 2018-12-05T16:04:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
test_reshape.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from ....core import tile
from ....core.operand import OperandStage
from ...datasource import ones
from ..reshape import TensorReshape
def test_reshape():
a = ones((10, 20, 30), chunk_size=5)
b = a.reshape(10, 600)
b = tile(b)
assert tuple(sum(s) for s in b.nsplits) == (10, 600)
a = ones((10, 600), chunk_size=5)
b = a.reshape(10, 30, 20)
b = tile(b)
assert tuple(sum(s) for s in b.nsplits) == (10, 30, 20)
a = ones((10, 600), chunk_size=5)
a.shape = [10, 30, 20]
a = tile(a)
assert tuple(sum(s) for s in a.nsplits) == (10, 30, 20)
# test reshape unknown shape
c = a[a > 0]
d = c.reshape(10, 600)
assert d.shape == (10, 600)
d = c.reshape(-1, 10)
assert len(d.shape) == 2
assert np.isnan(d.shape[0])
assert d.shape[1]
with pytest.raises(TypeError):
a.reshape((10, 30, 20), other_argument=True)
def test_shuffle_reshape():
a = ones((31, 27), chunk_size=10)
b = a.reshape(27, 31)
b.op.extra_params["_reshape_with_shuffle"] = True
b = tile(b)
assert tuple(sum(s) for s in b.nsplits) == (27, 31)
assert isinstance(b.chunks[0].op, TensorReshape)
assert b.chunks[0].op.stage == OperandStage.reduce
shuffle_map_sample = b.chunks[0].inputs[0].inputs[0]
assert isinstance(shuffle_map_sample.op, TensorReshape)
assert shuffle_map_sample.op.stage == OperandStage.map
|
94845278f783a644a243ed829affc042abf5500b
|
32e910f5440c10b384bb26b5555ac7adb77540ee
|
/tools/infrastructure/install_hooks.py
|
1e1f63de43e76ba57ebf35ef3fec882b653719e5
|
[] |
permissive
|
smartdevicelink/sdl_core
|
76658282fd85b16ed6d91d8d4087d8cd1353db76
|
7343fc72c12edc8ac42a62556c9e4b29c9408bc3
|
refs/heads/master
| 2022-11-04T12:17:58.725371
| 2022-10-26T15:34:13
| 2022-10-26T15:34:13
| 24,724,170
| 269
| 306
|
BSD-3-Clause
| 2022-10-26T15:34:15
| 2014-10-02T15:16:26
|
C++
|
UTF-8
|
Python
| false
| false
| 978
|
py
|
install_hooks.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Install (copy) git hooks
"""
import os
import glob
import shutil
from utils import setup_working_dir
def uninstall_hooks(hooks_dir):
print('Deleting existing pre-commit hooks from {}'.format(hooks_dir))
files = glob.glob(os.path.join(hooks_dir, 'pre-commit*'))
for item in files:
os.remove(item)
def install_hooks(src_dir, dst_dir):
print('Installing pre-commit hooks')
src_files = glob.glob(os.path.join(src_dir, 'pre-commit*'))
for item in src_files:
shutil.copy(item, dst_dir)
def main():
''' Main logic '''
setup_working_dir()
print('Current working dir is {}'.format(os.getcwd()))
hooks_src_dir = os.path.join(
os.getcwd(), 'tools', 'infrastructure', 'git-hooks')
hooks_dst_dir = os.path.join(os.getcwd(), '.git', 'hooks')
uninstall_hooks(hooks_dst_dir)
install_hooks(hooks_src_dir, hooks_dst_dir)
if __name__ == '__main__':
main()
|
60fbecdd159e278f0459573b947c7f5bab885f8b
|
e24f602d08fa2bdffde52abc5c33dce3480cb403
|
/src/harvesters/_private/core/subject.py
|
af8edc7dd01421ed66459d1be92eb91033662638
|
[
"CC0-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
genicam/harvesters
|
0a7a1b61ea275e6705ba19453d2a3f13d3b890da
|
b97fec3c0972007b1a15a2b6a8b7e5582d6d72d7
|
refs/heads/master
| 2023-08-14T15:19:54.533436
| 2023-04-10T09:17:50
| 2023-04-10T09:17:50
| 133,908,095
| 462
| 88
|
Apache-2.0
| 2023-08-02T04:25:30
| 2018-05-18T05:40:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,354
|
py
|
subject.py
|
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
#
# Copyright 2018 EMVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Standard library imports
# Related third party imports
# Local application/library specific imports
class Subject:
def __init__(self):
#
super().__init__()
#
self._observers = []
def add_observer(self, observer):
if observer not in self._observers:
self._observers.append(observer)
def remove_observer(self, observer):
if observer in self._observers:
self._observers.remove(observer)
def update_observers(self):
# Update its observers.
for o in self._observers:
o.update()
|
2c3e15131d0fddb7764d02213f2e87be4c270b7b
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/14_并查集/经典题/2076. 处理含限制条件的好友请求.py
|
f14cbbdba22a00487f308241e38d44035d2497a2
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,693
|
py
|
2076. 处理含限制条件的好友请求.py
|
from collections import defaultdict
from typing import DefaultDict, List
from 可撤销并查集 import RevocableUnionFindArray
# restrictions[i] = [xi, yi] 意味着用户 xi 和用户 yi 不能 成为 朋友
# 如果第 j 个好友请求 成功 ,那么 result[j] 就是 true
# 2 <= n <= 1000
# 0 <= restrictions.length <= 1000
# 1 <= requests.length <= 1000
class UnionFind:
__slots__ = ("n", "part", "parent", "rank")
def __init__(self, n: int):
self.n = n
self.part = n
self.parent = list(range(n))
self.rank = [1] * n
def find(self, x: int) -> int:
while x != self.parent[x]:
self.parent[x] = self.parent[self.parent[x]]
x = self.parent[x]
return x
def union(self, x: int, y: int) -> bool:
rootX = self.find(x)
rootY = self.find(y)
if rootX == rootY:
return False
if self.rank[rootX] > self.rank[rootY]:
rootX, rootY = rootY, rootX
self.parent[rootX] = rootY
self.rank[rootY] += self.rank[rootX]
self.part -= 1
return True
def isConnected(self, x: int, y: int) -> bool:
return self.find(x) == self.find(y)
def getGroups(self) -> DefaultDict[int, List[int]]:
groups = defaultdict(list)
for key in range(self.n):
root = self.find(key)
groups[root].append(key)
return groups
def getRoots(self) -> List[int]:
return list(set(self.find(key) for key in self.parent))
def __repr__(self) -> str:
return "\n".join(f"{root}: {member}" for root, member in self.getGroups().items())
def __len__(self) -> int:
return self.part
class Solution:
def friendRequests(
self, n: int, restrictions: List[List[int]], requests: List[List[int]]
) -> List[bool]:
"""并查集,连接之前遍历限制找到对应组的边,如果边重合,那么就不能连这条边 O(n^2)"""
res = []
uf = UnionFind(n)
for user1, user2 in requests:
root1, root2 = uf.find(user1), uf.find(user2)
if root1 == root2:
res.append(True)
else:
for user3, user4 in restrictions:
root3, root4 = uf.find(user3), uf.find(user4)
if (root1 == root3 and root2 == root4) or (root1 == root4 and root2 == root3):
res.append(False)
break
else:
uf.union(user1, user2)
res.append(True)
return res
def friendRequests2(
self, n: int, restrictions: List[List[int]], requests: List[List[int]]
) -> List[bool]:
"""可撤销并查集"""
res = []
uf = RevocableUnionFindArray(n)
for user1, user2 in requests:
uf.union(user1, user2)
ok = True
for user3, user4 in restrictions:
if uf.isConnected(user3, user4):
uf.revocate()
ok = False
break
res.append(ok)
return res
print(Solution().friendRequests(n=3, restrictions=[[0, 1]], requests=[[0, 2], [2, 1]]))
print(Solution().friendRequests2(n=3, restrictions=[[0, 1]], requests=[[0, 2], [2, 1]]))
# 输出:[true,false]
# 解释:
# 请求 0 :用户 0 和 用户 2 可以成为朋友,所以他们成为直接朋友。
# 请求 1 :用户 2 和 用户 1 不能成为朋友,因为这会使 用户 0 和 用户 1 成为间接朋友 (1--2--0) 。
|
743032888e6de6f1b996f5d47c5b96c9b6c5edf2
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/torch/package/_package_pickler.py
|
cabc6a82164fb3aaf767f14cf60bca58535fcf61
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 4,622
|
py
|
_package_pickler.py
|
"""isort:skip_file"""
from pickle import ( # type: ignore[attr-defined]
_compat_pickle,
_extension_registry,
_getattribute,
_Pickler,
EXT1,
EXT2,
EXT4,
GLOBAL,
Pickler,
PicklingError,
STACK_GLOBAL,
)
from struct import pack
from types import FunctionType
from .importer import Importer, ObjMismatchError, ObjNotFoundError, sys_importer
class PackagePickler(_Pickler):
"""Package-aware pickler.
This behaves the same as a normal pickler, except it uses an `Importer`
to find objects and modules to save.
"""
def __init__(self, importer: Importer, *args, **kwargs):
self.importer = importer
super().__init__(*args, **kwargs)
# Make sure the dispatch table copied from _Pickler is up-to-date.
# Previous issues have been encountered where a library (e.g. dill)
# mutate _Pickler.dispatch, PackagePickler makes a copy when this lib
# is imported, then the offending library removes its dispatch entries,
# leaving PackagePickler with a stale dispatch table that may cause
# unwanted behavior.
self.dispatch = _Pickler.dispatch.copy() # type: ignore[misc]
self.dispatch[FunctionType] = PackagePickler.save_global # type: ignore[assignment]
def save_global(self, obj, name=None):
# unfortunately the pickler code is factored in a way that
# forces us to copy/paste this function. The only change is marked
# CHANGED below.
write = self.write # type: ignore[attr-defined]
memo = self.memo # type: ignore[attr-defined]
# CHANGED: import module from module environment instead of __import__
try:
module_name, name = self.importer.get_name(obj, name)
except (ObjNotFoundError, ObjMismatchError) as err:
raise PicklingError(f"Can't pickle {obj}: {str(err)}") from None
module = self.importer.import_module(module_name)
_, parent = _getattribute(module, name)
# END CHANGED
if self.proto >= 2: # type: ignore[attr-defined]
code = _extension_registry.get((module_name, name))
if code:
assert code > 0
if code <= 0xFF:
write(EXT1 + pack("<B", code))
elif code <= 0xFFFF:
write(EXT2 + pack("<H", code))
else:
write(EXT4 + pack("<i", code))
return
lastname = name.rpartition(".")[2]
if parent is module:
name = lastname
# Non-ASCII identifiers are supported only with protocols >= 3.
if self.proto >= 4: # type: ignore[attr-defined]
self.save(module_name) # type: ignore[attr-defined]
self.save(name) # type: ignore[attr-defined]
write(STACK_GLOBAL)
elif parent is not module:
self.save_reduce(getattr, (parent, lastname)) # type: ignore[attr-defined]
elif self.proto >= 3: # type: ignore[attr-defined]
write(
GLOBAL
+ bytes(module_name, "utf-8")
+ b"\n"
+ bytes(name, "utf-8")
+ b"\n"
)
else:
if self.fix_imports: # type: ignore[attr-defined]
r_name_mapping = _compat_pickle.REVERSE_NAME_MAPPING
r_import_mapping = _compat_pickle.REVERSE_IMPORT_MAPPING
if (module_name, name) in r_name_mapping:
module_name, name = r_name_mapping[(module_name, name)]
elif module_name in r_import_mapping:
module_name = r_import_mapping[module_name]
try:
write(
GLOBAL
+ bytes(module_name, "ascii")
+ b"\n"
+ bytes(name, "ascii")
+ b"\n"
)
except UnicodeEncodeError:
raise PicklingError(
"can't pickle global identifier '%s.%s' using "
"pickle protocol %i" % (module, name, self.proto) # type: ignore[attr-defined]
) from None
self.memoize(obj) # type: ignore[attr-defined]
def create_pickler(data_buf, importer, protocol=4):
if importer is sys_importer:
# if we are using the normal import library system, then
# we can use the C implementation of pickle which is faster
return Pickler(data_buf, protocol=protocol)
else:
return PackagePickler(importer, data_buf, protocol=protocol)
|
4b1c27472b9d78bcac874203cbc1c52a34fc0eab
|
7f0ed84404abb57c3bc062cd986b67c6a254d3f3
|
/check.py
|
40c6aa979b88d37df35bdcc5b9453e708d28cfa3
|
[
"BSD-3-Clause"
] |
permissive
|
abhinavsingh/proxy.py
|
ad8eff50476815c4654cade3b6fe628e1ecea2eb
|
30574fd0414005dfa8792a6e797023e862bdcf43
|
refs/heads/develop
| 2023-09-01T03:40:13.473734
| 2023-04-17T04:12:18
| 2023-04-17T04:12:18
| 12,228,178
| 2,691
| 657
|
BSD-3-Clause
| 2023-09-08T11:56:39
| 2013-08-19T21:33:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,934
|
py
|
check.py
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import sys
import subprocess
from pathlib import Path
from proxy.common.version import __version__ as lib_version
# This script ensures our versions never run out of sync.
#
# 1. TODO: Version is hardcoded in homebrew stable package
# installer file, but it only needs to match with lib
# versions if current git branch is master
PY_FILE_PREFIX = b'# -*- coding: utf-8 -*-\n' + \
b'"""\n' + \
b' proxy.py\n' + \
b' ~~~~~~~~\n' + \
b' \xe2\x9a\xa1\xe2\x9a\xa1\xe2\x9a\xa1 Fast, Lightweight, Pluggable, TLS interception capable' + \
b' proxy server focused on\n' + \
b' Network monitoring, controls & Application development, testing, debugging.\n' + \
b'\n' + \
b' :copyright: (c) 2013-present by Abhinav Singh and contributors.\n' + \
b' :license: BSD, see LICENSE for more details.\n'
REPO_ROOT = Path(__file__).parent
ALL_PY_FILES = (
list(REPO_ROOT.glob('*.py')) +
list((REPO_ROOT / 'proxy').rglob('*.py')) +
list((REPO_ROOT / 'examples').rglob('*.py')) +
list((REPO_ROOT / 'skeleton').rglob('*.py')) +
list((REPO_ROOT / 'benchmark').rglob('*.py')) +
list((REPO_ROOT / 'tests').rglob('*.py'))
)
# Ensure all python files start with licensing information
for py_file in ALL_PY_FILES:
if py_file.is_file() and py_file.name != '_scm_version.py':
with open(py_file, 'rb') as f:
code = f.read(len(PY_FILE_PREFIX))
if code != PY_FILE_PREFIX:
print(
'Expected license not found in {0}'.format(
str(py_file),
),
)
sys.exit(1)
# Update README.md flags section to match current library --help output
lib_help = subprocess.check_output(
['python', '-m', 'proxy', '-h'],
)
with open('README.md', 'rb+') as f:
c = f.read()
pre_flags, post_flags = c.split(b'# Flags')
f.seek(0)
f.write(
pre_flags +
b'# Flags\n\n```console\n\xe2\x9d\xaf proxy -h\n' +
lib_help +
b'```\n',
)
# Version is also hardcoded in README.md flags section
readme_version_cmd = 'cat README.md | grep "proxy.py v" | tail -2 | head -1 | cut -d " " -f 2 | cut -c2-'
readme_version_output = subprocess.check_output(
['bash', '-c', readme_version_cmd],
)
# Doesn't contain "v" prefix
readme_version = readme_version_output.decode().strip()
if readme_version != lib_version:
print(
'Version mismatch found. {0} (readme) vs {1} (lib).'.format(
readme_version, lib_version,
),
)
sys.exit(1)
|
6cd0af7afb580dc7e428bf195f777e845da35086
|
8d585fa3b2419d9b993be2f2652e448cfeedc8b2
|
/checks/wmi_check.py
|
2ae2fdd911f5c74b15a7331b4b70a110e20a81ca
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
DataDog/dd-agent
|
bd4ef0edb234293b51d30894a529ce94b37060f8
|
16fa4ec9ae11ca0adfffbd260c5b4899dc73509f
|
refs/heads/master
| 2023-08-16T09:52:21.816487
| 2023-07-11T15:37:34
| 2023-07-11T15:37:34
| 1,210,071
| 1,227
| 991
|
NOASSERTION
| 2023-06-28T12:20:19
| 2010-12-31T03:02:47
|
Python
|
UTF-8
|
Python
| false
| false
| 436
|
py
|
wmi_check.py
|
# provided for backward compatibility. wmi_check.py renamed to winwmi_check.py
# to prevent collision with the actual wmi check; provide the redirect for
# any agent check that uses the base library
#
# this file will be deprecated in Agent6
from checks.winwmi_check import ( # noqa: F401
WMIMetric,
InvalidWMIQuery,
MissingTagBy,
TagQueryUniquenessFailure,
WinWMICheck,
to_time,
from_time,
) # noqa: F401
|
a50d617b1350bc45c9ad022b26f38042dceb2ba2
|
ce1c91c33d9b612e97361527e5a974996208c90d
|
/glue/core/tests/test_pandas.py
|
282597ddbb3e720ef6a488c3a32898735ee5282c
|
[
"BSD-3-Clause"
] |
permissive
|
glue-viz/glue
|
5f52faaf91e1ca4822d3983b6a4b9b60e8807f38
|
1a5c7676c025a1a025068b806f6f90ed53bba543
|
refs/heads/main
| 2023-09-04T09:24:00.519833
| 2023-08-17T09:40:04
| 2023-08-17T09:40:04
| 1,768,238
| 609
| 149
|
NOASSERTION
| 2023-09-13T20:56:14
| 2011-05-18T20:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,204
|
py
|
test_pandas.py
|
import numpy as np
import pandas as pd
from unittest.mock import MagicMock
from pandas.testing import (assert_series_equal,
assert_frame_equal)
from ..component import Component, DerivedComponent, CategoricalComponent
from ..data import Data
class TestPandasConversion(object):
def test_Component_conversion(self):
comp = Component(np.arange(5))
series = pd.Series(np.arange(5))
assert_series_equal(series, comp.to_series())
def test_DerivedComponent_conversion(self):
data = MagicMock()
link = MagicMock()
link.compute.return_value = np.arange(5)
comp = DerivedComponent(data, link)
series = pd.Series(np.arange(5))
assert_series_equal(series, comp.to_series())
def test_CategoricalComponent_conversion(self):
comp = CategoricalComponent(np.array(['a', 'b', 'c', 'd']))
series = pd.Series(['a', 'b', 'c', 'd'])
assert_series_equal(series, comp.to_series())
def test_CoordinateComponent_conversion(self):
d = Data(x=[1, 2, 3])
series = pd.Series(np.array([0, 1, 2]))
comp = d.get_component(d.pixel_component_ids[0])
assert_series_equal(series, comp.to_series())
def test_Data_conversion(self):
d = Data(n=np.array([4, 5, 6, 7]))
cat_comp = CategoricalComponent(np.array(['a', 'b', 'c', 'd']))
d.add_component(cat_comp, 'c')
link = MagicMock()
link.compute.return_value = np.arange(4)
deriv_comp = DerivedComponent(d, link)
d.add_component(deriv_comp, 'd')
order = [comp.label for comp in d.components]
frame = pd.DataFrame()
frame['Pixel Axis 0 [x]'] = np.ogrid[0:4]
frame['n'] = np.array([4, 5, 6, 7])
frame['c'] = ['a', 'b', 'c', 'd']
frame['d'] = np.arange(4)
out_frame = d.to_dataframe()
assert_frame_equal(out_frame, frame)
assert list(out_frame.columns) == order
def test_multi_dimensional(self):
a = np.array([[2, 3], [5, 4], [6, 7]])
comp = Component(a)
series = pd.Series(a.ravel())
assert_series_equal(series, comp.to_series())
|
a9292ccfcdfe3a0b92ba8d9dfe25f139ae3005f6
|
b49e949f8e6dff23c82be162c6fe5562c7a0afdd
|
/Projection/DataUtils/eval.py
|
b7acf41667ceefd06d707bb26c0c106c01674bc6
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
scofield7419/XSRL-ACL
|
8bb2e5bb445611b02a2377b75dbbaa73d01c18d9
|
ea918bfecb360dfcf741a9a7dc59cc6d7eae6628
|
refs/heads/master
| 2023-06-24T09:36:44.696791
| 2023-06-15T09:15:25
| 2023-06-15T09:15:25
| 254,301,176
| 281
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,166
|
py
|
eval.py
|
class Eval:
def __init__(self):
self.predict_num = 0
self.correct_num = 0
self.gold_num = 0
self.precision = 0
self.recall = 0
self.fscore = 0
self.acc = 0
def clear_PRF(self):
self.predict_num = 0
self.correct_num = 0
self.gold_num = 0
self.precision = 0
self.recall = 0
self.fscore = 0
def getFscore(self, y_pred, y_true, all_sentence_length):
for i in range(len(y_true)):
sentence_length = all_sentence_length[i]
for p_lable, g_lable in zip(y_pred[i][:sentence_length], y_true[i][:sentence_length]):
if (p_lable == g_lable) and (p_lable!= '_'):
self.correct_num += 1
true_labels = [item for item in y_true[i][:sentence_length] if item != '_']
pred_labels = [item for item in y_pred[i][:sentence_length] if item != '_']
self.predict_num += len(pred_labels)
self.gold_num += len(true_labels)
if self.predict_num == 0:
self.precision = 0
else:
self.precision = (self.correct_num / self.predict_num) * 100
if self.gold_num == 0:
self.recall = 0
else:
self.recall = (self.correct_num / self.gold_num) * 100
if self.precision + self.recall == 0:
self.fscore = 0
else:
self.fscore = (2 * (self.precision * self.recall)) / (self.precision + self.recall)
self.accuracy(y_pred, y_true, all_sentence_length)
return self.precision, self.recall, self.fscore, self.acc
def accuracy(self, predict_labels, gold_labels, all_sentence_length):
cor = 0
totol_leng = sum([len(predict_label) for predict_label in predict_labels])
for p_lable, g_lable, sentence_length in zip(predict_labels, gold_labels, all_sentence_length):
for p_lable_, g_lable_ in zip(p_lable[:sentence_length], g_lable[:sentence_length]):
if p_lable_ == g_lable_:
cor += 1
self.acc = cor / totol_leng * 100
return self.acc
|
7ddae8e13746428ed53cbc9de57c4b4b271b03df
|
3537abc0aac3a8e4184572a290039bdde8e2d3f6
|
/src/tests/older_versions/v100/test_list_union_remove_and_increment.py
|
204d15107920a71a36e25d120823001ae17b41ad
|
[
"Apache-2.0"
] |
permissive
|
octabytes/FireO
|
859ba0baecbd339c7cdc0cdf8b145f3b2b81ce3a
|
cd019ef140fa8f9f32316ab4dce99ea0cd230284
|
refs/heads/master
| 2023-09-03T07:06:35.567277
| 2023-07-18T06:24:14
| 2023-07-18T06:24:14
| 211,646,730
| 274
| 43
|
Apache-2.0
| 2023-09-05T07:55:25
| 2019-09-29T10:37:00
|
Python
|
UTF-8
|
Python
| false
| false
| 910
|
py
|
test_list_union_remove_and_increment.py
|
import fireo
from fireo.fields import ListField, NumberField
from fireo.models import Model
class SpecCity(Model):
states = ListField()
population = NumberField()
def test_list_union():
city = SpecCity.collection.create(states=['LA', 'DC'], population=100)
city.states = fireo.ListUnion(['AB'])
city.update()
city = SpecCity.collection.get(city.key)
assert city.states == ['LA', 'DC', 'AB']
def test_list_remove():
city = SpecCity.collection.create(states=['LA', 'DC'], population=100)
city.states = fireo.ListRemove(['DC'])
city.update()
city = SpecCity.collection.get(city.key)
assert city.states == ['LA']
def test_number_increment():
city = SpecCity.collection.create(states=['LA', 'DC'], population=100)
city.population = fireo.Increment(50)
city.update()
city = SpecCity.collection.get(city.key)
assert city.population == 150
|
811a0f04247b76feff55588b487c469368258173
|
cb387354b5c160f31e16baec5eda433538b2bd4f
|
/setup.py
|
4458676e9412fab84a5e074d22c735bfd5f3808b
|
[
"BSD-2-Clause-Views"
] |
permissive
|
fzlee/alipay
|
88a93077d0f64b0d64f9d9fc57f6c74814e789b9
|
79cc3067e3accbbfd31e54e95281060421fc3c57
|
refs/heads/master
| 2023-08-07T16:18:54.903696
| 2023-06-10T06:11:34
| 2023-06-10T06:11:34
| 74,120,580
| 1,416
| 442
|
NOASSERTION
| 2023-01-10T10:38:03
| 2016-11-18T10:37:16
|
Python
|
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
setup.py
|
#!/usr/bin/env python
# coding: utf-8
"""
setup.py
~~~~~~~~~~
"""
from setuptools import setup
import unittest
def alipay_test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('tests')
return test_suite
setup(
name="python-alipay-sdk",
version="3.3.0",
author="fzlee",
author_email="hi@ifconfiger.com",
description="Python SDK for AliPay, RSA is the only sign method we support",
license="BSD",
keywords="python sdk alipay",
url="https://github.com/fzlee/alipay",
packages=['alipay'],
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
],
install_requires=["pycryptodomex>=3.15.0", "pyOpenSSL>=22.0.0"],
test_suite="setup.alipay_test_suite"
)
|
187aa50d0a7ef3b1280c27814c5763a85995b931
|
8b41d9bcc3367c5532193f3e3015987cfb378713
|
/hparams.py
|
ad0b0d11df2466d21cf480fea09e8d3a8485d47e
|
[
"MIT"
] |
permissive
|
BogiHsu/Tacotron2-PyTorch
|
063ef7da8a8943395bac38aec98ab34b7df3c330
|
b1761fd7660e56adf39f3c8d02852fbaec1da2c5
|
refs/heads/master
| 2022-05-10T05:13:57.226182
| 2022-04-12T10:40:56
| 2022-04-12T10:40:56
| 189,844,891
| 155
| 38
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,868
|
py
|
hparams.py
|
from text import symbols
class hparams:
seed = 0
################################
# Data Parameters #
################################
text_cleaners=['english_cleaners']
################################
# Audio #
################################
num_mels = 80
num_freq = 513
sample_rate = 22050
frame_shift = 256
frame_length = 1024
fmin = 0
fmax = 8000
power = 1.5
gl_iters = 30
################################
# Train #
################################
is_cuda = True
pin_mem = True
n_workers = 4
prep = True
pth = 'lj-22k.pkl'
lr = 2e-3
betas = (0.9, 0.999)
eps = 1e-6
sch = True
sch_step = 4000
max_iter = 200e3
batch_size = 16
iters_per_log = 10
iters_per_sample = 500
iters_per_ckpt = 10000
weight_decay = 1e-6
grad_clip_thresh = 1.0
eg_text = 'OMAK is a thinking process which considers things always positively.'
################################
# Model Parameters #
################################
n_symbols = len(symbols)
symbols_embedding_dim = 512
# Encoder parameters
encoder_kernel_size = 5
encoder_n_convolutions = 3
encoder_embedding_dim = 512
# Decoder parameters
n_frames_per_step = 3
decoder_rnn_dim = 1024
prenet_dim = 256
max_decoder_ratio = 10
gate_threshold = 0.5
p_attention_dropout = 0.1
p_decoder_dropout = 0.1
# Attention parameters
attention_rnn_dim = 1024
attention_dim = 128
# Location Layer parameters
attention_location_n_filters = 32
attention_location_kernel_size = 31
# Mel-post processing network parameters
postnet_embedding_dim = 512
postnet_kernel_size = 5
postnet_n_convolutions = 5
|
bf40a9c75e0b5c2d963c99c1c4296f16337f0660
|
c085f61a0f9da8ccd2f56ab9142799a4dcfd1052
|
/bindings/pydeck-carto/examples/scripts/carto_layer_geo_query.py
|
feb90e8c8aeaa12959225e33a6ff2b224e636a7b
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
visgl/deck.gl
|
148ec752c02cf6d0a35d7e11dbb44b5e341553c2
|
1c4f9a99596b3f913426f7d5df3d7e831b4e99c0
|
refs/heads/master
| 2023-09-01T00:08:12.457341
| 2023-08-29T22:19:23
| 2023-08-29T22:19:23
| 48,030,204
| 3,929
| 1,012
|
MIT
| 2023-09-14T17:42:01
| 2015-12-15T08:38:29
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 744
|
py
|
carto_layer_geo_query.py
|
"""
CartoLayer
==========
Render cloud data from a query.
"""
import pydeck as pdk
import pydeck_carto as pdkc
from carto_auth import CartoAuth
carto_auth = CartoAuth.from_oauth()
pdkc.register_carto_layer()
layer = pdk.Layer(
"CartoLayer",
data="SELECT geom, name FROM carto-demo-data.demo_tables.airports",
type_=pdkc.MapType.QUERY,
connection=pdkc.CartoConnection.CARTO_DW,
credentials=pdkc.get_layer_credentials(carto_auth),
get_fill_color=[238, 77, 90],
point_radius_min_pixels=2.5,
pickable=True,
)
view_state = pdk.ViewState(latitude=0, longitude=0, zoom=1)
r = pdk.Deck(layer, map_style=pdk.map_styles.ROAD, initial_view_state=view_state)
r.to_html("carto_layer_geo_query.html", open_browser=True)
|
be47b21795c50737baef34bcdca0c08138ceebf6
|
ec718413230ea285ce6d4fbcc5241483e7a604da
|
/torch/remove_classes.py
|
4d5f2f47cb343dbd2af811c27ee1760a40fd4422
|
[
"MIT"
] |
permissive
|
ctongfei/nexus
|
04ab9b24d6b822d59f6ff961e0b74198ac3cfa45
|
a9998d346036ec521f494f447001105cae21cdbc
|
refs/heads/master
| 2023-03-23T15:43:24.420404
| 2019-08-18T04:09:37
| 2019-08-18T04:09:37
| 92,866,653
| 260
| 20
|
MIT
| 2019-05-11T15:25:09
| 2017-05-30T19:03:48
|
Scala
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
remove_classes.py
|
#! /usr/bin/env python3
import sys
in_class = False
for l in sys.stdin:
if l.startswith("class"):
in_class = True
if in_class:
if l.startswith("};"):
in_class = False
continue
else:
print(l, end='')
|
437375c657f950dec5ad43a059a473c4dd392817
|
80456bf94b56da863d6a62a4a7acf48b6aa2384f
|
/fabric/set_hosts.py
|
87d750981b8fb6947d5e316b401cace722e1b326
|
[
"Apache-2.0"
] |
permissive
|
k8sp/sextant
|
464d490908ebc42efbd275b58320f8dc60e23050
|
0f4fec9ae68aa5eba689aeb1b7584977033ab907
|
refs/heads/develop
| 2020-04-04T21:42:14.275150
| 2017-10-10T03:38:11
| 2017-10-10T03:38:11
| 63,209,168
| 106
| 34
| null | 2017-11-13T12:04:14
| 2016-07-13T02:54:51
|
Shell
|
UTF-8
|
Python
| false
| false
| 2,111
|
py
|
set_hosts.py
|
from __future__ import with_statement
from fabric.api import *
from fabric.contrib.console import confirm
import fabric.operations as op
import yaml
import sys
import re
mac_ip={}
host_ip={}
set_type=""
def modify_mac_hosts(path, ips):
import copy
local = copy.deepcopy(ips)
#hostname->ip
hosts = []
with open(path, "r") as fp:
for line in fp.read().split('\n'):
if len(re.sub('\s*', '', line)) and not line.startswith('#'):
parts = re.split('\s+', line)
ip = parts[0]
host_name = " ".join(parts[1:])
hosts.append([host_name, ip])
fp.close()
for n in hosts:
if n[0] in local:
n[1] = local[n[0]]
local[n[0]]= ""
with open(path, "w") as fw:
for n in hosts:
fw.write("%s %s\n" % (n[1], n[0]) )
for n in local:
if len(local[n]) > 0:
fw.write("%s %s\n" % (local[n], n) )
fw.close()
def set_mac_hosts():
src_path = "/etc/hosts"
dst_path = env.host_string + "/hosts"
get(src_path)
if set_type == "mac" or set_type == "all":
modify_mac_hosts(dst_path, mac_ip)
if set_type == "host" or set_type == "all":
modify_mac_hosts(dst_path, host_ip)
put(dst_path, src_path)
def display():
print host_ip
print mac_ip
with open("hosts.yaml", 'r') as stream:
try:
y = yaml.load(stream)
env.hosts = y["hosts"]
env.user = y["user"]
env.password = y["password"]
set_type = y["set_type"]
except yaml.YAMLError as exc:
print(exc)
abort("load yaml error")
for h in env.hosts:
dst_path = h + "/mac_ip_host"
with open(dst_path, "r") as fp:
for line in fp.read().split('\n'):
if len(re.sub('\s*', '', line)) and not line.startswith('#'):
parts = re.split('\s+', line)
mac = parts[0].replace(":", "-")
ip = parts[1]
host_name = parts[2]
mac_ip[mac] = ip
host_ip[host_name] = ip
|
ccff9601fb6f6e1384b008c3e0c216014a7eae0d
|
f27e3fdc97290b1db6d3fa7039ad59e4f8b5a760
|
/keras/comet-keras-lstm-example.py
|
cd5b899fbff97f893efa2f3a33a71c8efc917c2c
|
[] |
no_license
|
comet-ml/comet-examples
|
9c7bcea8b97986fb7987cbe0f4533f619e2a0939
|
9da5d4f296e633bb7e63b47dc2d3f7a0780c0a4e
|
refs/heads/master
| 2023-08-19T03:32:51.864273
| 2023-08-09T09:30:34
| 2023-08-09T09:30:34
| 158,587,515
| 134
| 55
| null | 2023-09-13T16:58:41
| 2018-11-21T18:00:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,640
|
py
|
comet-keras-lstm-example.py
|
# coding: utf-8
'''Trains an LSTM model on the IMDB sentiment classification task.
Example adapted from https://github.com/keras-team/keras/tree/master/examples
'''
# import comet_ml in the top of your file(before all other Machine learning libs)
from comet_ml import Experiment
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.datasets import imdb
from keras.callbacks import EarlyStopping, ModelCheckpoint
import os
# Setting the API key (saved as environment variable)
exp = Experiment(
#api_key="YOUR API KEY",
# or
api_key=os.environ.get("COMET_API_KEY"),
project_name='comet-examples')
params = {"num_nodes": 128,
"model_type": "LSTM",
"dropout": 0.4,
"dropout_recurrent": 0.4,
"num_words": 20000,
"maxlen": 90, # cut texts after this number of words
"skip_top": 10,
"batch_size": 32,
"epochs": 15
}
exp.log_parameters(params)
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=params["num_words"],
skip_top=params["skip_top"],
seed=42
)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=params["maxlen"])
x_test = sequence.pad_sequences(x_test, maxlen=params["maxlen"])
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
model.add(Embedding(params["num_words"], 128))
model.add(LSTM(params["num_nodes"],
dropout=params['dropout'],
recurrent_dropout=params['dropout_recurrent']))
model.add(Dense(1, activation='sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
print('Training...')
model.fit(x_train, y_train,
batch_size=params["batch_size"],
epochs=params["epochs"],
validation_data=(x_test, y_test),
callbacks=[EarlyStopping(monitor='loss', min_delta=1e-3, patience=2, verbose=1, mode='auto')])
score, acc = model.evaluate(x_test, y_test,
batch_size=params["batch_size"])
model.save('imdb_lstm_final.h5')
print('Test score:', score)
print('Test accuracy:', acc)
|
016b8fb7c146d4c178a909e02d3deceb1c4859a8
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/request/AlipayWeiboPucChargeRequest.py
|
992935c4e86e28a6a09cc8a9105b73f6d63f1ab4
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 9,708
|
py
|
AlipayWeiboPucChargeRequest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayWeiboPucChargeRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._apd_id = None
self._cell_id = None
self._device_info_token = None
self._exparam = None
self._imei = None
self._ip = None
self._lac_id = None
self._login_from = None
self._mac = None
self._partner_user_id = None
self._tid = None
self._token = None
self._umid = None
self._wireless_mac = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def apd_id(self):
return self._apd_id
@apd_id.setter
def apd_id(self, value):
self._apd_id = value
@property
def cell_id(self):
return self._cell_id
@cell_id.setter
def cell_id(self, value):
self._cell_id = value
@property
def device_info_token(self):
return self._device_info_token
@device_info_token.setter
def device_info_token(self, value):
self._device_info_token = value
@property
def exparam(self):
return self._exparam
@exparam.setter
def exparam(self, value):
self._exparam = value
@property
def imei(self):
return self._imei
@imei.setter
def imei(self, value):
self._imei = value
@property
def ip(self):
return self._ip
@ip.setter
def ip(self, value):
self._ip = value
@property
def lac_id(self):
return self._lac_id
@lac_id.setter
def lac_id(self, value):
self._lac_id = value
@property
def login_from(self):
return self._login_from
@login_from.setter
def login_from(self, value):
self._login_from = value
@property
def mac(self):
return self._mac
@mac.setter
def mac(self, value):
self._mac = value
@property
def partner_user_id(self):
return self._partner_user_id
@partner_user_id.setter
def partner_user_id(self, value):
self._partner_user_id = value
@property
def tid(self):
return self._tid
@tid.setter
def tid(self, value):
self._tid = value
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
@property
def umid(self):
return self._umid
@umid.setter
def umid(self, value):
self._umid = value
@property
def wireless_mac(self):
return self._wireless_mac
@wireless_mac.setter
def wireless_mac(self, value):
self._wireless_mac = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.weibo.puc.charge'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.apd_id:
if hasattr(self.apd_id, 'to_alipay_dict'):
params['apd_id'] = json.dumps(obj=self.apd_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['apd_id'] = self.apd_id
if self.cell_id:
if hasattr(self.cell_id, 'to_alipay_dict'):
params['cell_id'] = json.dumps(obj=self.cell_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['cell_id'] = self.cell_id
if self.device_info_token:
if hasattr(self.device_info_token, 'to_alipay_dict'):
params['device_info_token'] = json.dumps(obj=self.device_info_token.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['device_info_token'] = self.device_info_token
if self.exparam:
if hasattr(self.exparam, 'to_alipay_dict'):
params['exparam'] = json.dumps(obj=self.exparam.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['exparam'] = self.exparam
if self.imei:
if hasattr(self.imei, 'to_alipay_dict'):
params['imei'] = json.dumps(obj=self.imei.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['imei'] = self.imei
if self.ip:
if hasattr(self.ip, 'to_alipay_dict'):
params['ip'] = json.dumps(obj=self.ip.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['ip'] = self.ip
if self.lac_id:
if hasattr(self.lac_id, 'to_alipay_dict'):
params['lac_id'] = json.dumps(obj=self.lac_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['lac_id'] = self.lac_id
if self.login_from:
if hasattr(self.login_from, 'to_alipay_dict'):
params['login_from'] = json.dumps(obj=self.login_from.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['login_from'] = self.login_from
if self.mac:
if hasattr(self.mac, 'to_alipay_dict'):
params['mac'] = json.dumps(obj=self.mac.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['mac'] = self.mac
if self.partner_user_id:
if hasattr(self.partner_user_id, 'to_alipay_dict'):
params['partner_user_id'] = json.dumps(obj=self.partner_user_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['partner_user_id'] = self.partner_user_id
if self.tid:
if hasattr(self.tid, 'to_alipay_dict'):
params['tid'] = json.dumps(obj=self.tid.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['tid'] = self.tid
if self.token:
if hasattr(self.token, 'to_alipay_dict'):
params['token'] = json.dumps(obj=self.token.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['token'] = self.token
if self.umid:
if hasattr(self.umid, 'to_alipay_dict'):
params['umid'] = json.dumps(obj=self.umid.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['umid'] = self.umid
if self.wireless_mac:
if hasattr(self.wireless_mac, 'to_alipay_dict'):
params['wireless_mac'] = json.dumps(obj=self.wireless_mac.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['wireless_mac'] = self.wireless_mac
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
228efb733d4b73df5c55264075cbe4d385dd359f
|
67ca269e39935d0c439329c3a63df859e40168bb
|
/autoPyTorch/constants.py
|
3d77f77bc56911c3b8a2df628a6d0d1803845cd9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
automl/Auto-PyTorch
|
2e67ffb44f40d9993470ded9b63f10a5164b41df
|
56a2ac1d69c7c61a847c678879a67f5d3672b3e8
|
refs/heads/master
| 2023-07-14T22:55:57.826602
| 2022-08-23T16:43:15
| 2022-08-23T16:43:15
| 159,791,040
| 2,214
| 280
|
Apache-2.0
| 2023-04-04T14:41:15
| 2018-11-30T08:18:34
|
Python
|
UTF-8
|
Python
| false
| false
| 3,406
|
py
|
constants.py
|
TABULAR_CLASSIFICATION = 1
IMAGE_CLASSIFICATION = 2
TABULAR_REGRESSION = 3
IMAGE_REGRESSION = 4
TIMESERIES_FORECASTING = 5
REGRESSION_TASKS = [TABULAR_REGRESSION, IMAGE_REGRESSION]
CLASSIFICATION_TASKS = [TABULAR_CLASSIFICATION, IMAGE_CLASSIFICATION]
FORECASTING_TASKS = [TIMESERIES_FORECASTING] # TODO extend FORECASTING TASKS to Classification and regression tasks
TABULAR_TASKS = [TABULAR_CLASSIFICATION, TABULAR_REGRESSION]
IMAGE_TASKS = [IMAGE_CLASSIFICATION, IMAGE_REGRESSION]
TIMESERIES_TASKS = [TIMESERIES_FORECASTING]
TASK_TYPES = REGRESSION_TASKS + CLASSIFICATION_TASKS + FORECASTING_TASKS
TASK_TYPES_TO_STRING = \
{TABULAR_CLASSIFICATION: 'tabular_classification',
IMAGE_CLASSIFICATION: 'image_classification',
TABULAR_REGRESSION: 'tabular_regression',
IMAGE_REGRESSION: 'image_regression',
TIMESERIES_FORECASTING: 'time_series_forecasting'}
STRING_TO_TASK_TYPES = \
{'tabular_classification': TABULAR_CLASSIFICATION,
'image_classification': IMAGE_CLASSIFICATION,
'tabular_regression': TABULAR_REGRESSION,
'image_regression': IMAGE_REGRESSION,
'time_series_forecasting': TIMESERIES_FORECASTING}
# Output types have been defined as in scikit-learn type_of_target
# (https://scikit-learn.org/stable/modules/generated/sklearn.utils.multiclass.type_of_target.html)
BINARY = 10
CONTINUOUSMULTIOUTPUT = 11
MULTICLASS = 12
CONTINUOUS = 13
MULTICLASSMULTIOUTPUT = 14
OUTPUT_TYPES = [BINARY, CONTINUOUSMULTIOUTPUT, MULTICLASS, CONTINUOUS]
OUTPUT_TYPES_TO_STRING = \
{BINARY: 'binary',
CONTINUOUSMULTIOUTPUT: 'continuous-multioutput',
MULTICLASS: 'multiclass',
CONTINUOUS: 'continuous',
MULTICLASSMULTIOUTPUT: 'multiclass-multioutput'}
STRING_TO_OUTPUT_TYPES = \
{'binary': BINARY,
'continuous-multioutput': CONTINUOUSMULTIOUTPUT,
'multiclass': MULTICLASS,
'continuous': CONTINUOUS,
'multiclass-multioutput': MULTICLASSMULTIOUTPUT}
CLASSIFICATION_OUTPUTS = [BINARY, MULTICLASS, MULTICLASSMULTIOUTPUT]
REGRESSION_OUTPUTS = [CONTINUOUS, CONTINUOUSMULTIOUTPUT]
ForecastingDependenciesNotInstalledMSG = "Additional dependencies must be installed to work with time series " \
"forecasting tasks! Please run \n pip install autoPyTorch[forecasting] \n to "\
"install the corresponding dependencies!"
# This value is applied to ensure numerical stability: Sometimes we want to rescale some values: value / scale.
# We make the scale value to be 1 if it is smaller than this value to ensure that the scaled value will not resutl in
# overflow
VERY_SMALL_VALUE = 1e-12
# The constant values for time series forecasting comes from
# https://github.com/rakshitha123/TSForecasting/blob/master/experiments/deep_learning_experiments.py
# seasonality map, maps a frequency value to a number
FORECASTING_BUDGET_TYPE = ('resolution', 'num_seq', 'num_sample_per_seq')
SEASONALITY_MAP = {
"1min": [1440, 10080, 525960],
"10min": [144, 1008, 52596],
"30min": [48, 336, 17532],
"1H": [24, 168, 8766],
"1D": 7,
"1W": 365.25 / 7,
"1M": 12,
"1Q": 4,
"1Y": 1
}
# To avoid that we get a sequence that is too long to be fed to a network
MAX_WINDOW_SIZE_BASE = 500
# AutoPyTorch optionally allows network inference or metrics calculation for the following datasets
OPTIONAL_INFERENCE_CHOICES = ('test',)
|
696749582efedfc6b62db7c67c117f2661c07b60
|
f7381953d4b6b5ed7efd181b21a07e73f6950f04
|
/src/salt/base/ext/_utils/can_obd_conn.py
|
6ea2873ea55e8b1b0cf5b21e05a478e9ebf1994a
|
[
"Apache-2.0"
] |
permissive
|
autopi-io/autopi-core
|
75a50c03bee9d575eb1f25832644fe701f18d1d3
|
ab42459602787e9a557c3a00df40b20a52879fc7
|
refs/heads/master
| 2023-06-01T06:32:00.772505
| 2023-04-27T14:32:47
| 2023-04-27T14:32:47
| 136,908,152
| 141
| 33
|
Apache-2.0
| 2020-01-10T09:09:58
| 2018-06-11T10:02:02
|
Python
|
UTF-8
|
Python
| false
| false
| 30,191
|
py
|
can_obd_conn.py
|
import binascii
import can
import collections
import datetime
import logging
import sys
from obd import OBD, OBDStatus, commands, OBDCommand, ECU, decoders
from obd.utils import bytes_to_int
from obd.protocols import UnknownProtocol, CANProtocol
from obd.interfaces.stn11xx import STN11XX, STN11XXError
from six import string_types
from obd_conn import OBDConn
from can_conn import CANConn
log = logging.getLogger(__name__)
DEBUG = log.isEnabledFor(logging.DEBUG)
# ELM327: CAN
class ISO_15765_4_11bit_500k(CANProtocol):
NAME = "ISO 15765-4 (CAN 11/500)"
ID = "6"
HEADER_BITS = 11
DEFAULT_BAUDRATE = 500000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class ISO_15765_4_29bit_500k(CANProtocol):
NAME = "ISO 15765-4 (CAN 29/500)"
ID = "7"
HEADER_BITS = 29
DEFAULT_BAUDRATE = 500000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class ISO_15765_4_11bit_250k(CANProtocol):
NAME = "ISO 15765-4 (CAN 11/250)"
ID = "8"
HEADER_BITS = 11
DEFAULT_BAUDRATE = 250000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class ISO_15765_4_29bit_250k(CANProtocol):
NAME = "ISO 15765-4 (CAN 29/250)"
ID = "9"
HEADER_BITS = 29
DEFAULT_BAUDRATE = 250000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
# STN11XX: High Speed CAN
class HSC_ISO_11898_11bit_500k(CANProtocol):
NAME = "HS CAN (ISO 11898, 11bit, 500kbps, var DLC)"
ID = "31"
HEADER_BITS = 11
DEFAULT_BAUDRATE = 500000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class HSC_ISO_11898_29bit_500k(CANProtocol):
NAME = "HS CAN (ISO 11898, 29bit, 500kbps, var DLC)"
ID = "32"
HEADER_BITS = 29
DEFAULT_BAUDRATE = 500000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class HSC_ISO_15765_11bit_500k(CANProtocol):
NAME = "HS CAN (ISO 15765, 11bit, 500kbps, DLC=8)"
ID = "33"
HEADER_BITS = 11
DEFAULT_BAUDRATE = 500000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class HSC_ISO_15765_29bit_500k(CANProtocol):
NAME = "HS CAN (ISO 15765, 29bit, 500kbps, DLC=8)"
ID = "34"
HEADER_BITS = 29
DEFAULT_BAUDRATE = 500000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class HSC_ISO_15765_11bit_250k(CANProtocol):
NAME = "HS CAN (ISO 15765, 11bit, 250kbps, DLC=8)"
ID = "35"
HEADER_BITS = 11
DEFAULT_BAUDRATE = 250000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class HSC_ISO_15765_29bit_250k(CANProtocol):
NAME = "HS CAN (ISO 15765, 29bit, 250kbps, DLC=8)"
ID = "36"
HEADER_BITS = 29
DEFAULT_BAUDRATE = 250000
INTERFACE = "can0"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
# STN11XX: J1939
class HSC_J1939_11bit_250k(UnknownProtocol):
NAME = "J1939 (11bit, 250kbps)"
ID = "41"
HEADER_BITS = 11
DEFAULT_BAUDRATE = 250000
INTERFACE = "can0"
def __init__(self, lines_0100):
UnknownProtocol.__init__(self, lines_0100)
class HSC_J1939_29bit_250k(UnknownProtocol):
NAME = "J1939 (29bit, 250kbps)"
ID = "42"
HEADER_BITS = 29
DEFAULT_BAUDRATE = 250000
INTERFACE = "can0"
def __init__(self, lines_0100):
UnknownProtocol.__init__(self, lines_0100)
# STN11XX: Medium Speed CAN
class MSC_ISO_11898_11bit_125k(CANProtocol):
NAME = "MS CAN (ISO 11898, 11bit, 125kbps, var DLC)"
ID = "51"
HEADER_BITS = 11
DEFAULT_BAUDRATE = 125000
INTERFACE = "can1"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class MSC_ISO_11898_29bit_125k(CANProtocol):
NAME = "MS CAN (ISO 11898, 29bit, 125kbps, var DLC)"
ID = "52"
HEADER_BITS = 29
DEFAULT_BAUDRATE = 125000
INTERFACE = "can1"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class MSC_ISO_15765_11bit_125k(CANProtocol):
NAME = "MS CAN (ISO 15765, 11bit, 125kbps, DLC=8)"
ID = "53"
HEADER_BITS = 11
DEFAULT_BAUDRATE = 125000
INTERFACE = "can1"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class MSC_ISO_15765_29bit_125k(CANProtocol):
NAME = "MS CAN (ISO 15765, 29bit, 125kbps, DLC=8)"
ID = "54"
HEADER_BITS = 29
DEFAULT_BAUDRATE = 125000
INTERFACE = "can1"
def __init__(self, lines_0100):
CANProtocol.__init__(self, lines_0100)
class SocketCANError(STN11XXError):
def __init__(self, *args, **kwargs):
super(SocketCANError, self).__init__(*args, **kwargs)
class SocketCANInterface(STN11XX):
CAN_SUPPORTED_PROTOCOLS = collections.OrderedDict({
# ELM327: CAN
ISO_15765_4_11bit_500k.ID: ISO_15765_4_11bit_500k,
ISO_15765_4_29bit_500k.ID: ISO_15765_4_29bit_500k,
ISO_15765_4_11bit_250k.ID: ISO_15765_4_11bit_250k,
ISO_15765_4_29bit_250k.ID: ISO_15765_4_29bit_250k,
# STN11XX: High Speed CAN
HSC_ISO_11898_11bit_500k.ID: HSC_ISO_11898_11bit_500k, # HS CAN (ISO 11898, 11bit, 500kbps, var DLC)
HSC_ISO_11898_29bit_500k.ID: HSC_ISO_11898_29bit_500k, # HS CAN (ISO 11898, 29bit, 500kbps, var DLC)
HSC_ISO_15765_11bit_500k.ID: HSC_ISO_15765_11bit_500k, # HS CAN (ISO 15765, 11bit, 500kbps, DLC=8)
HSC_ISO_15765_29bit_500k.ID: HSC_ISO_15765_29bit_500k, # HS CAN (ISO 15765, 29bit, 500kbps, DLC=8)
HSC_ISO_15765_11bit_250k.ID: HSC_ISO_15765_11bit_250k, # HS CAN (ISO 15765, 11bit, 250kbps, DLC=8)
HSC_ISO_15765_29bit_250k.ID: HSC_ISO_15765_29bit_250k, # HS CAN (ISO 15765, 29bit, 250kbps, DLC=8)
#STN11XX: J1939
HSC_J1939_11bit_250k.ID: HSC_J1939_11bit_250k, # J1939 (11bit, 250kbps)
HSC_J1939_29bit_250k.ID: HSC_J1939_29bit_250k, # J1939 (29bit, 250kbps)
# STN11XX: Medium Speed CAN
MSC_ISO_11898_11bit_125k.ID: MSC_ISO_11898_11bit_125k, # MS CAN (ISO 11898, 11bit, 125kbps, var DLC)
MSC_ISO_11898_29bit_125k.ID: MSC_ISO_11898_29bit_125k, # MS CAN (ISO 11898, 29bit, 125kbps, var DLC)
MSC_ISO_15765_11bit_125k.ID: MSC_ISO_15765_11bit_125k, # MS CAN (ISO 15765, 11bit, 125kbps, DLC=8)
MSC_ISO_15765_29bit_125k.ID: MSC_ISO_15765_29bit_125k, # MS CAN (ISO 15765, 29bit, 125kbps, DLC=8)
})
CAN_TRY_PROTOCOLS = [
# ELM327: CAN
ISO_15765_4_11bit_500k,
ISO_15765_4_29bit_500k,
ISO_15765_4_11bit_250k,
ISO_15765_4_29bit_250k,
]
def __init__(self, status_callback=None):
self._status = OBDStatus.NOT_CONNECTED
self._status_callback = status_callback
self._protocol = UnknownProtocol([])
self._echo_off = True
# Cached settings that have been changed runtime
self._runtime_settings = {}
self._port = CANConn(__salt__)
def open(self, channel=None, protocol=None, echo_off=True, print_headers=True):
log.info("Opening SocketCAN interface connection: Channel={:}, Protocol={:}".format(
channel,
"auto" if protocol is None else protocol
))
protocol_dict = protocol if isinstance(protocol, dict) else {"id": protocol}
protocol_cls = self.supported_protocols().get(protocol_dict.get("id", None), None)
self._port.setup(
channel=channel or getattr(protocol_cls, "INTERFACE", "can0"),
bitrate=protocol_dict.get("baudrate", None) or getattr(protocol_cls, "DEFAULT_BAUDRATE", 500000))
# Open connection
try:
self._port.open()
except:
log.exception("Failed to open SocketCAN connection")
# Remember to report back status
self._trigger_status_callback()
raise
# By now, we've successfuly communicated with the SocketCAN interface, but not the car
self._status = OBDStatus.ITF_CONNECTED
# Remember to report back status
self._trigger_status_callback()
# Try to communicate with the car, and load the correct protocol parser
try:
self.set_protocol(protocol_dict.pop("id", None), **protocol_dict)
except Exception as ex:
log.warning(str(ex))
return
log.info("Connected successfully to vehicle: Channel={:}, Protocol={:}".format(
channel,
self._protocol.ID
))
#def close(self):
def reopen(self):
self.close()
self.open(channel=self._port or getattr(protocol_cls, "INTERFACE", "can0"),
protocol={
"id": getattr(self._protocol, "ID", None),
"baudrate": getattr(self._protocol, "baudrate", None)
} if not getattr(self._protocol, "autodetected", True) else None
)
#def restore_defaults(self):
#def warm_reset(self):
#def reset(self):
def connection(self):
raise NotImplementedError("Not supported by SocketCAN interface")
#def status(self):
#def runtime_settings(self):
@classmethod
def supported_protocols(cls):
return cls.CAN_SUPPORTED_PROTOCOLS
#def protocol(self, verify=True):
#def ecus(self):
def set_protocol(self, ident, **kwargs):
ret = super(STN11XX, self).set_protocol(ident, **kwargs) # NOTE: Calls ELM327's method (skipping STN11XX's)
# Automatic filtering mode is on per default
self._runtime_settings["auto_filtering"] = True
self._ensure_auto_filtering()
# No custom filters are set
self._runtime_settings.pop("can_pass_filters", None)
self._runtime_settings.pop("can_block_filters", None)
self._runtime_settings.pop("can_flow_control_filters", None)
self._runtime_settings.pop("j1939_pgn_filters", None)
return ret
def set_baudrate(self, baudrate):
raise NotImplementedError("Not supported by SocketCAN interface")
#def set_expect_responses(self, value):
#def set_response_timeout(self, value):
#def set_adaptive_timing(self, value):
#def set_header(self, value):
def reset_header(self):
self._runtime_settings.pop("header", None)
self._runtime_settings.pop("can_priority", None)
#def set_can_auto_format(self, value):
#def set_can_extended_address(self, value):
#def set_can_priority(self, value):
#def set_print_spaces(self, value):
#def query(self, cmd, header=None, parse=True, read_timeout=None):
def relay(self, cmd, raw_response=False):
raise NotImplementedError("Not supported by SocketCAN interface")
def send(self, cmd, delay=None, read_timeout=None, interrupt_delay=None, raw_response=False):
ret = []
# Respond OK on all AT/ST commands
if cmd[:2].upper() in ["AT", "ST"]:
if cmd.upper() == "ATRV":
res = __salt__["spm.query"]("volt_readout")
ret.append("{:.2f}V".format(res["value"]))
else:
ret.append(self.OK)
log.info("Returning {:} to AT/ST command '{:}'".format(ret, cmd))
return ret
if self._runtime_settings.get("expect_responses", True):
# Determine how many reply messages, if specified
replies = None
if cmd[-2] == " ":
replies = int(cmd[-1], 16)
cmd = cmd[:-2]
# Response timing
timeout = 0.2 # 200ms
if self._runtime_settings.get("adaptive_timing", 1) == 0: # Adaptive timing off (fixed timeout)
timeout = self._runtime_settings.get("response_timeout", 50) * 4 / 1000
# Configure formatter
msg_formatter = lambda msg : can_message_formatter (
msg,
include_spaces=self._runtime_settings.get("print_spaces", True),
include_hashtag=False
)
kwargs = {}
# CAN extended address (flow control)
extended_address = self._runtime_settings.get("can_extended_address", None)
if extended_address != None:
kwargs["extended_address"] = int(str(extended_address), 16)
res = self._port.query(self._build_can_msg(cmd),
replies=replies,
timeout=timeout,
flow_control=[self._port.FLOW_CONTROL_CUSTOM, self._port.FLOW_CONTROL_OBD],
zero_padding=(8 if self._runtime_settings.get("can_auto_format", True) else 0),
strict=False,
**kwargs)
if res:
ret = [msg_formatter(r) for r in res]
elif not raw_response:
raise SocketCANError(self.ERRORS["NO DATA"], code="NO DATA") # Same behaviour as old
else:
self._port.send(self._build_can_msg(cmd))
return ret
#def set_can_monitor_mode(self, value):
def monitor(self, mode=0, auto_format=False, filtering=False, raw_response=False, formatter=None, **kwargs):
ret = []
if auto_format:
ValueError("CAN auto formatting is currently not supported by SocketCan OBD connection")
# Ensure CAN automatic formatting
self.set_can_auto_format(auto_format)
# Ensure CAN monitoring mode
self.set_can_monitor_mode(mode)
if not filtering:
self.clear_filters()
format_response = kwargs.pop("format_response", False)
if not formatter:
formatter = lambda msg : can_message_formatter(msg, include_spaces=self._runtime_settings.get("print_spaces", True), include_hashtag=format_response)
# Setup mode
current_mode = self._runtime_settings.get("can_monitor_mode", 0)
if current_mode == 0:
skip_error_frames = True
elif current_mode == 2:
skip_error_frames = False
else:
raise ValueError("Monitor mode {:} is currently not supported by SocketCan OBD connection".format(current_mode))
# Monitor for specified duration
self._port.monitor_until(lambda msg: ret.append(formatter(msg)), skip_error_frames=skip_error_frames, **kwargs)
if not ret and not raw_response:
raise SocketCANError(self.ERRORS["NO DATA"], code="NO DATA") # Same behaviour as old
return ret
def monitor_continuously(self, wait=False, enrich=None, **kwargs):
if wait:
raise ValueError("Set duration to enforce wait when using SocketCAN interface")
return self.monitor(
receive_timeout=kwargs.pop("receive_timeout", 1),
keep_listening=kwargs.pop("keep_listening", True),
buffer_size=kwargs.pop("buffer_size", 5000),
formatter=enrich,
**kwargs)
def auto_filtering(self, enable=None):
ret = super(SocketCANInterface, self).auto_filtering(enable=enable)
if ret:
self._ensure_auto_filtering()
return ret
#def list_filters(self, type="ALL"):
#def list_filters_by(self, type):
#def add_filter(self, type, value):
def clear_filters(self, type="ALL"):
if type.upper() == self.FILTER_TYPE_ALL:
self._port.clear_filters()
super(SocketCANInterface, self).clear_filters(type=type)
def can_pass_filters(self, clear=False, add=None):
if clear:
self._port.clear_filters()
if add:
filter = {}
if isinstance(add, dict):
filter = add
else:
val = str(add)
if "," in val:
id, mask = val.split(",")
filter["id"] = id
filter["mask"] = mask
else:
filter["id"] = val
# Ensure hex strings are converted to integers
if "id" in filter and isinstance(filter["id"], string_types):
filter["id"] = int(filter["id"], 16)
if "mask" in filter and isinstance(filter["mask"], string_types):
filter["mask"] = int(filter["mask"], 16)
filter.setdefault("is_ext_id", self._protocol.HEADER_BITS > 11)
# Clear before adding if automatic filtering is enabled
if self._runtime_settings.get("auto_filtering", True):
self._port.clear_filters()
self._port.ensure_filter(**filter)
super(SocketCANInterface, self).can_pass_filters(clear=clear, add=add)
def can_block_filters(self, clear=False, add=None):
if add:
raise NotImplementedError("Not supported by SocketCAN interface - only pass filters are supported")
def can_flow_control_filters(self, clear=False, add=None):
"""
From the little research that could be done for this task, it looks like the can flow control filters are just
standard pass filters. The can_flow_control_id_pairs is what actually does the matching and communication on
the CAN bus, the filters just allow those headers to show up.
For now, this function just passes the arguments `clear` and `add` down to can_pass_filters function.
"""
self.can_pass_filters(clear=clear, add=add)
def j1939_pgn_filters(self, clear=False, add=None):
"""
NOTE NV: So far, it looks like J1939 only cares about the PGN itself being
the same in order to match the header. For example, we've seen that RPM comes
from header 0x0CF00400, but also from 0x1CF004FC - i.e. the identifier is 0xF004.
This is also the reasoning for the mask added below -> 0x00FFFF00, we care only for the PGN.
This might need to be changed depending on what other behaviour we see from J1939 enabled vehicles.
NOTE NV: After investigating J1939 further, it looks like the mask might need to be 0x03FFFF00, because
of the extended data page and data page bits that are located in those bit locations. Will leave the code
bellow as is for right now, but we might need to change it later
"""
if isinstance(add, string_types):
# Only format the 'add' parameter if it is passed as a stirng
if log.isEnabledFor(logging.DEBUG):
log.debug("Adding J1939 filter mask to filter {}".format(add))
add = "{:x},00FFFF00".format(int(add, 16) << 8)
self.can_pass_filters(clear=clear, add=add)
def can_flow_control_id_pairs(self, clear=False, add=None):
if clear:
self._port.flow_control_id_mappings.clear()
if add:
tx_id, rx_id = str(add).replace(" ", "").split(",")
self._port.flow_control_id_mappings[int(rx_id, 16)] = int(tx_id, 16)
super(SocketCANInterface, self).can_flow_control_id_pairs(clear=clear, add=add)
def _verify_protocol(self, ident, test=False):
if isinstance(ident, string_types):
protocol_cls = self.supported_protocols()[ident]
else:
protocol_cls = ident
res_0100 = self._port.obd_query(0x01, 0x00, is_ext_id=protocol_cls.HEADER_BITS > 11, strict=False, skip_error_frames=True)
if not res_0100:
msg = "No data received when trying to verify connectivity of protocol '{:}'".format(ident)
if test:
logger.warning(msg)
return []
else:
raise SocketCANError(msg)
return [can_message_formatter(r) for r in res_0100]
def _manual_protocol(self, ident, verify=False, baudrate=None):
protocol_cls = self.supported_protocols()[ident]
baudrate = baudrate or protocol_cls.DEFAULT_BAUDRATE
self._port.setup(channel=protocol_cls.INTERFACE, bitrate=baudrate)
if verify:
# Verify protocol connectivity
res_0100 = self._verify_protocol(protocol_cls, test=not verify)
# Initialize protocol parser
protocol = protocol_cls(res_0100)
else:
protocol = protocol_cls([])
# Remember to set the used baudrate
protocol.baudrate = baudrate
return protocol
def _auto_protocol(self, verify=True, **kwargs):
log.info("Initiates autodetection of OBD protocol")
if not verify:
ValueError("SocketCAN interface cannot autodetect OBD protocol without verify")
res_0100 = []
for protocol_cls in self.CAN_TRY_PROTOCOLS:
log.info("Trying with protocol '{:}' on SocketCAN interface '{:}'".format(protocol_cls.ID, protocol_cls.INTERFACE))
self._port.setup(channel=protocol_cls.INTERFACE, bitrate=protocol_cls.DEFAULT_BAUDRATE)
# Verify protocol connectivity
try:
res_0100 = self._verify_protocol(protocol_cls, test=not verify)
if res_0100:
log.info("Got reply using protocol '{:}' on SocketCAN interface '{:}'".format(protocol_cls.ID, protocol_cls.INTERFACE))
# Instantiate the corresponding protocol parser
protocol = protocol_cls(res_0100)
protocol.baudrate = protocol_cls.DEFAULT_BAUDRATE
return protocol
except Exception as ex:
log.info("Unable to verify OBD protocol '{:}' on SocketCAN interface '{:}': {:}".format(protocol_cls.ID, protocol_cls.INTERFACE, ex))
raise SocketCANError("Unable to autodetect OBD protocol")
def _interrupt(self, *args, **kwargs):
raise NotImplementedError("Not supported by SocketCAN interface")
def _write(self, *args, **kwargs):
raise NotImplementedError("Not supported by SocketCAN interface")
def _read(self, *args, **kwargs):
raise NotImplementedError("Not supported by SocketCAN interface")
def _read_line(self, *args, **kwargs):
raise NotImplementedError("Not supported by SocketCAN interface")
def _build_can_msg(self, cmd):
header = self._runtime_settings.get("header", None)
if header == None:
header = "18DB33F1" if self._protocol.HEADER_BITS > 11 else "7DF"
else:
header = header.strip()
# CAN priority
prio = self._runtime_settings.get("can_priority", None)
if prio != None:
header = str(prio) + str(header)
data = bytearray.fromhex(cmd)
# CAN automatic formatting
can_auto_format = self._runtime_settings.get("can_auto_format", True)
if can_auto_format:
data = bytearray([len(data)]) + data
# CAN extended address
extended_address = self._runtime_settings.get("can_extended_address", None)
if extended_address != None:
data = bytearray([int(str(extended_address), 16)]) + data
is_extended_id=len(header) > 3
return can.Message(arbitration_id=int(header, 16),
data=data.ljust(8, "\0") if can_auto_format else data,
is_extended_id=is_extended_id)
def _ensure_auto_filtering(self):
if self._protocol.HEADER_BITS > 11:
self._port.ensure_filter(id=0x18DAF100, is_ext_id=True, mask=0x1FFFFF00, clear=True)
else:
self._port.ensure_filter(id=0x7E8, is_ext_id=False, mask=0x7F8, clear=True)
def can_message_formatter(msg, include_hashtag=False, include_spaces=False):
"""
Formats a raw python-can Message object to a string.
"""
# This is how obd_conn would handle the formatting. Remove the following 2 lines to allow both spaces and hashtags
if include_hashtag:
include_spaces=False
# Data
data_hex = binascii.hexlify(msg.data)
if include_spaces:
data_string = " ".join(data_hex[i:i+2] for i in range(0, len(data_hex), 2))
else:
data_string = data_hex
# Seperator
seperator_string = "#" if include_hashtag else ""
if include_spaces:
seperator_string = " " + seperator_string + (" " if include_hashtag else "")
# Header
header_string = ("{:08x}" if msg.is_extended_id else "{:02x}").format(msg.arbitration_id)
# Return value
return header_string + seperator_string + data_string
def vin_decoder(messages):
return messages[0].data[3:].decode("ascii")
def odometer_decoder(messages):
return bytes_to_int(messages[0].data[2:6])/10
def add_commands_if_not_there(mode_number, pids):
mode_list = commands.modes[mode_number]
existing_names = [p.name for p in mode_list]
for pid in pids:
if not (pid.name in existing_names):
mode_list.append(pid)
if not pid.name in commands.__dict__.keys():
commands.__dict__[pid.name] = pid
class SocketCAN_OBD(OBD):
def __init__(self, channel=None, protocol=None, load_commands=True, status_callback=None, reset_callback=None):
# name description cmd bytes decoder ECU fast
__mode1__ = [
OBDCommand("ODOMETER" , "Current odometer value" , b"01A6", 8, odometer_decoder, ECU.ENGINE, True),
]
__mode9__ = [
OBDCommand("PIDS_9A" , "Supported PIDs [01-20]" , b"0900", 4, decoders.pid, ECU.ENGINE, True),
OBDCommand("VIN_MESSAGE_COUNT" , "VIN Message Count" , b"0901", 1, decoders.uas(0x01), ECU.ENGINE, True),
OBDCommand("VIN" , "Get Vehicle Identification Number" , b"0902", 20, vin_decoder, ECU.ENGINE, True),
]
add_commands_if_not_there(1, __mode1__)
add_commands_if_not_there(9, __mode9__)
# Add the 0900 to default supported commands, so it can be queried during discovery
supported_commands = commands.base_commands()
supported_commands.append(__mode9__[0])
self.interface = SocketCANInterface(status_callback=status_callback)
self.supported_commands = set(supported_commands)
self.reset_callback = reset_callback
self.fast = False
self._last_command = b""
self._frame_counts = {}
self.interface.open(channel, protocol=protocol)
if load_commands:
try:
self._load_commands()
except:
log.exception("Unable to load OBD commands")
class SocketCAN_OBDConn(OBDConn):
def __init__(self, __salt__):
globals()["__salt__"] = __salt__
super(SocketCAN_OBDConn, self).__init__()
@property
def protocol_autodetect_interface(self):
return "can0"
def setup(self, **settings):
channel = settings.pop("channel", "can0")
super(SocketCAN_OBDConn, self).setup(device=channel, baudrate=9600, **settings) # NOTE: Reused device parameter for channel selection
def open(self, force=False):
if self.is_permanently_closed and not force:
raise Warning("SocketCAN OBD connection is no longer available as it has been permanently closed")
# Reset flag
self.is_permanently_closed = False
if DEBUG:
log.debug("Opening SocketCAN OBD connection")
try :
self._obd = SocketCAN_OBD(
channel=self._device, # NOTE: Reused device parameter for channel selection
protocol={
"id": self._protocol_id if self._protocol_id != "AUTO" else None, # None value will result in autodetection
"baudrate": self._protocol_baudrate,
"verify": self._protocol_verify
},
load_commands=self._protocol_verify, # Only load supported commands when protocol is verified
status_callback=self._status_callback,
reset_callback=self._reset_callback
)
if self._advanced_initial:
self.ensure_advanced_settings.undecorated(self, self._advanced_initial) # No need to call the 'ensure_open' decorator
return self
except Exception:
log.exception("Failed to open SocketCAN OBD connection")
raise
def ensure_protocol(self, ident, baudrate=None, verify=True):
use_default_verify = True
# We do not want the monitor handler to verify as default on every invocation
try:
# NOTE: Not pretty, but should at least be fast
use_default_verify = not sys._getframe(1).f_code.co_name.startswith("monitor_")
except:
log.exception("Failed to identify the caller function")
super(SocketCAN_OBDConn, self).ensure_protocol(ident, baudrate=baudrate, verify=(self._protocol_verify or verify) if use_default_verify else verify)
def supported_protocols(self):
return SocketCANInterface.supported_protocols()
def _enrich_monitor_entry(self, msg):
return {
"_stamp": datetime.datetime.fromtimestamp(msg.timestamp).isoformat(),
"value": can_message_formatter(msg)
}
# @Decorators.ensure_open
def query(self, *args, **kwargs):
try:
return super(SocketCAN_OBDConn, self).query(*args, **kwargs)
finally:
self._obd.interface.reset_header()
# @Decorators.ensure_open
def send(self, *args, **kwargs):
try:
return super(SocketCAN_OBDConn, self).send(*args, **kwargs)
finally:
self._obd.interface.reset_header()
# @Decorators.ensure_open
def monitor(self, **kwargs):
return self._obd.interface.monitor(**kwargs)
|
063be4358ed0bad55bc53a2bca303caef0559980
|
23c0a6071860971616326ffeeac0b56135c5c6ee
|
/xmlschema/helpers.py
|
4165188f5dd4e797040d4ceb38da28008a4e55ae
|
[
"MIT"
] |
permissive
|
sissaschool/xmlschema
|
36d74acb2a36459512855ea0264cc4d1ebbef8f5
|
6bf6d8e6d19cfc0ba151effb25cc57c3789d16fd
|
refs/heads/master
| 2023-08-31T11:07:35.750326
| 2023-08-07T09:47:09
| 2023-08-07T09:47:09
| 70,905,710
| 272
| 55
|
MIT
| 2023-08-25T20:03:20
| 2016-10-14T11:52:54
|
Python
|
UTF-8
|
Python
| false
| false
| 11,595
|
py
|
helpers.py
|
#
# Copyright (c), 2016-2021, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <brunato@sissa.it>
#
import re
from collections import Counter
from decimal import Decimal
from typing import Any, Callable, Iterator, List, MutableMapping, \
Optional, Tuple, Union
from xml.etree.ElementTree import ParseError
from .exceptions import XMLSchemaValueError, XMLSchemaTypeError
from .names import XSI_SCHEMA_LOCATION, XSI_NONS_SCHEMA_LOCATION
from .aliases import ElementType, NamespacesType, AtomicValueType, NumericValueType
###
# Helper functions for QNames
NAMESPACE_PATTERN = re.compile(r'{([^}]*)}')
def get_namespace(qname: str, namespaces: Optional[NamespacesType] = None) -> str:
"""
Returns the namespace URI associated with a QName. If a namespace map is
provided tries to resolve a prefixed QName and then to extract the namespace.
:param qname: an extended QName or a local name or a prefixed QName.
:param namespaces: optional mapping from prefixes to namespace URIs.
"""
if not qname:
return ''
elif qname[0] != '{':
if namespaces is None:
return ''
qname = get_extended_qname(qname, namespaces)
try:
return NAMESPACE_PATTERN.match(qname).group(1) # type: ignore[union-attr]
except (AttributeError, TypeError):
return ''
def get_qname(uri: Optional[str], name: str) -> str:
"""
Returns an expanded QName from URI and local part. If any argument has boolean value
`False` or if the name is already an expanded QName, returns the *name* argument.
:param uri: namespace URI
:param name: local or qualified name
:return: string or the name argument
"""
if not uri or not name or name[0] in '{./[':
return name
else:
return f'{{{uri}}}{name}'
def local_name(qname: str) -> str:
"""
Return the local part of an expanded QName or a prefixed name. If the name
is `None` or empty returns the *name* argument.
:param qname: an expanded QName or a prefixed name or a local name.
"""
try:
if qname[0] == '{':
_namespace, qname = qname.split('}')
elif ':' in qname:
_prefix, qname = qname.split(':')
except IndexError:
return ''
except ValueError:
raise XMLSchemaValueError("the argument 'qname' has an invalid value %r" % qname)
except TypeError:
raise XMLSchemaTypeError("the argument 'qname' must be a string-like object")
else:
return qname
def get_prefixed_qname(qname: str,
namespaces: Optional[MutableMapping[str, str]],
use_empty: bool = True) -> str:
"""
Get the prefixed form of a QName, using a namespace map.
:param qname: an extended QName or a local name or a prefixed QName.
:param namespaces: an optional mapping from prefixes to namespace URIs.
:param use_empty: if `True` use the empty prefix for mapping.
"""
if not namespaces or not qname or qname[0] != '{':
return qname
namespace = get_namespace(qname)
prefixes = [x for x in namespaces if namespaces[x] == namespace]
if not prefixes:
return qname
elif prefixes[0]:
return f"{prefixes[0]}:{qname.split('}', 1)[1]}"
elif len(prefixes) > 1:
return f"{prefixes[1]}:{qname.split('}', 1)[1]}"
elif use_empty:
return qname.split('}', 1)[1]
else:
return qname
def get_extended_qname(qname: str, namespaces: Optional[MutableMapping[str, str]]) -> str:
"""
Get the extended form of a QName, using a namespace map.
Local names are mapped to the default namespace.
:param qname: a prefixed QName or a local name or an extended QName.
:param namespaces: an optional mapping from prefixes to namespace URIs.
"""
if not namespaces:
return qname
try:
if qname[0] == '{':
return qname
except IndexError:
return qname
try:
prefix, name = qname.split(':', 1)
except ValueError:
if not namespaces.get(''):
return qname
else:
return f"{{{namespaces['']}}}{qname}"
else:
try:
uri = namespaces[prefix]
except KeyError:
return qname
else:
return f'{{{uri}}}{name}' if uri else name
###
# Helper functions for ElementTree structures
def is_etree_element(obj: object) -> bool:
"""A checker for valid ElementTree elements that excludes XsdElement objects."""
return hasattr(obj, 'append') and hasattr(obj, 'tag') and hasattr(obj, 'attrib')
def is_etree_document(obj: object) -> bool:
"""A checker for valid ElementTree objects."""
return hasattr(obj, 'getroot') and hasattr(obj, 'parse') and hasattr(obj, 'iter')
def etree_iterpath(elem: ElementType,
tag: Optional[str] = None,
path: str = '.',
namespaces: Optional[NamespacesType] = None,
add_position: bool = False) -> Iterator[Tuple[ElementType, str]]:
"""
Creates an iterator for the element and its subelements that yield elements and paths.
If tag is not `None` or '*', only elements whose matches tag are returned from the iterator.
:param elem: the element to iterate.
:param tag: tag filtering.
:param path: the current path, '.' for default.
:param namespaces: is an optional mapping from namespace prefix to URI.
:param add_position: add context position to child elements that appear multiple times.
"""
if tag == "*":
tag = None
if not path:
path = '.'
if tag is None or elem.tag == tag:
yield elem, path
if add_position:
children_tags = Counter(e.tag for e in elem)
positions = Counter(t for t in children_tags if children_tags[t] > 1)
else:
positions = Counter()
for child in elem:
if callable(child.tag):
continue # Skip comments and PIs
child_name = child.tag if namespaces is None else get_prefixed_qname(child.tag, namespaces)
if path == '/':
child_path = f'/{child_name}'
else:
child_path = '/'.join((path, child_name))
if child.tag in positions:
child_path += '[%d]' % positions[child.tag]
positions[child.tag] += 1
yield from etree_iterpath(child, tag, child_path, namespaces, add_position)
def etree_getpath(elem: ElementType,
root: ElementType,
namespaces: Optional[NamespacesType] = None,
relative: bool = True,
add_position: bool = False,
parent_path: bool = False) -> Optional[str]:
"""
Returns the XPath path from *root* to descendant *elem* element.
:param elem: the descendant element.
:param root: the root element.
:param namespaces: an optional mapping from namespace prefix to URI.
:param relative: returns a relative path.
:param add_position: add context position to child elements that appear multiple times.
:param parent_path: if set to `True` returns the parent path. Default is `False`.
:return: An XPath expression or `None` if *elem* is not a descendant of *root*.
"""
if relative:
path = '.'
elif namespaces:
path = f'/{get_prefixed_qname(root.tag, namespaces)}'
else:
path = f'/{root.tag}'
if not parent_path:
for e, path in etree_iterpath(root, elem.tag, path, namespaces, add_position):
if e is elem:
return path
else:
for e, path in etree_iterpath(root, None, path, namespaces, add_position):
if elem in e:
return path
return None
def etree_iter_location_hints(elem: ElementType) -> Iterator[Tuple[Any, Any]]:
"""Yields schema location hints contained in the attributes of an element."""
if XSI_SCHEMA_LOCATION in elem.attrib:
locations = elem.attrib[XSI_SCHEMA_LOCATION].split()
for ns, url in zip(locations[0::2], locations[1::2]):
yield ns, url
if XSI_NONS_SCHEMA_LOCATION in elem.attrib:
for url in elem.attrib[XSI_NONS_SCHEMA_LOCATION].split():
yield '', url
def prune_etree(root: ElementType, selector: Callable[[ElementType], bool]) \
-> Optional[bool]:
"""
Removes from a tree structure the elements that verify the selector
function. The checking and eventual removals are performed using a
breadth-first visit method.
:param root: the root element of the tree.
:param selector: the single argument function to apply on each visited node.
:return: `True` if the root node verify the selector function, `None` otherwise.
"""
def _prune_subtree(elem: ElementType) -> None:
for child in elem[:]:
if selector(child):
elem.remove(child)
for child in elem:
_prune_subtree(child)
if selector(root):
del root[:]
return True
_prune_subtree(root)
return None
def count_digits(number: NumericValueType) -> Tuple[int, int]:
"""
Counts the digits of a number.
:param number: an int or a float or a Decimal or a string representing a number.
:return: a couple with the number of digits of the integer part and \
the number of digits of the decimal part.
"""
if isinstance(number, str):
number = str(Decimal(number)).lstrip('-+')
elif isinstance(number, bytes):
number = str(Decimal(number.decode())).lstrip('-+')
else:
number = str(number).lstrip('-+')
if 'E' in number:
significand, _, _exponent = number.partition('E')
elif 'e' in number:
significand, _, _exponent = number.partition('e')
elif '.' not in number:
return len(number.lstrip('0')), 0
else:
integer_part, _, decimal_part = number.partition('.')
return len(integer_part.lstrip('0')), len(decimal_part.rstrip('0'))
significand = significand.strip('0')
exponent = int(_exponent)
num_digits = len(significand) - 1 if '.' in significand else len(significand)
if exponent > 0:
return num_digits + exponent, 0
else:
return 0, num_digits - exponent - 1
def strictly_equal(obj1: object, obj2: object) -> bool:
"""Checks if the objects are equal and are of the same type."""
return obj1 == obj2 and type(obj1) is type(obj2)
def raw_xml_encode(value: Union[None, AtomicValueType, List[AtomicValueType],
Tuple[AtomicValueType, ...]]) -> Optional[str]:
"""Encodes a simple value to XML."""
if isinstance(value, bool):
return 'true' if value else 'false'
elif isinstance(value, (list, tuple)):
return ' '.join(str(e) for e in value)
else:
return str(value) if value is not None else None
def is_defuse_error(err: Exception) -> bool:
"""
Returns `True` if the error is related to defuse of XML data in the DTD
of the source (forbid entities or external references), `False` otherwise.
"""
if not isinstance(err, ParseError):
return False
msg = str(err)
return "Entities are forbidden" in msg or \
"Unparsed entities are forbidden" in msg or \
"External references are forbidden" in msg
|
7e6493bd629587b786e64095847dd2d56fc72a88
|
a0447b03ad89a41a5c2e2073e32aeaf4d6279340
|
/ironic/tests/unit/drivers/test_drac.py
|
6af1c2de67e87810c4108253b20d77afe7de7ca8
|
[
"Apache-2.0"
] |
permissive
|
openstack/ironic
|
2ae87e36d7a62d44b7ed62cad4e2e294d48e061b
|
ab76ff12e1c3c2208455e917f1a40d4000b4e990
|
refs/heads/master
| 2023-08-31T11:08:34.486456
| 2023-08-31T04:45:05
| 2023-08-31T04:45:05
| 10,066,301
| 411
| 365
|
Apache-2.0
| 2023-07-25T02:05:53
| 2013-05-14T22:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 8,258
|
py
|
test_drac.py
|
# Copyright (c) 2017-2019 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_utils import uuidutils
from ironic.conductor import task_manager
from ironic.drivers.modules import agent
from ironic.drivers.modules import drac
from ironic.drivers.modules import inspector
from ironic.drivers.modules import ipxe
from ironic.drivers.modules.network import flat as flat_net
from ironic.drivers.modules import noop
from ironic.drivers.modules.storage import noop as noop_storage
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.objects import utils as obj_utils
class IDRACHardwareTestCase(db_base.DbTestCase):
def setUp(self):
super(IDRACHardwareTestCase, self).setUp()
self.config_temp_dir('http_root', group='deploy')
self.config(enabled_hardware_types=['idrac'],
enabled_boot_interfaces=[
'idrac-redfish-virtual-media', 'ipxe', 'pxe'],
enabled_management_interfaces=[
'idrac', 'idrac-redfish', 'idrac-wsman'],
enabled_power_interfaces=[
'idrac', 'idrac-redfish', 'idrac-wsman'],
enabled_inspect_interfaces=[
'idrac', 'idrac-redfish', 'idrac-wsman', 'inspector',
'no-inspect'],
enabled_network_interfaces=['flat', 'neutron', 'noop'],
enabled_raid_interfaces=[
'idrac', 'idrac-wsman', 'idrac-redfish', 'no-raid',
'agent'],
enabled_vendor_interfaces=[
'idrac', 'idrac-wsman', 'no-vendor'],
enabled_bios_interfaces=[
'idrac-wsman', 'idrac-redfish', 'no-bios'])
def _validate_interfaces(self, driver, **kwargs):
self.assertIsInstance(
driver.boot,
kwargs.get('boot', ipxe.iPXEBoot))
self.assertIsInstance(
driver.deploy,
kwargs.get('deploy', agent.AgentDeploy))
self.assertIsInstance(
driver.management,
kwargs.get('management', drac.management.DracWSManManagement))
self.assertIsInstance(
driver.power,
kwargs.get('power', drac.power.DracWSManPower))
self.assertIsInstance(
driver.bios,
kwargs.get('bios', drac.bios.DracWSManBIOS))
self.assertIsInstance(
driver.console,
kwargs.get('console', noop.NoConsole))
self.assertIsInstance(
driver.inspect,
kwargs.get('inspect', drac.inspect.DracWSManInspect))
self.assertIsInstance(
driver.network,
kwargs.get('network', flat_net.FlatNetwork))
self.assertIsInstance(
driver.raid,
kwargs.get('raid', drac.raid.DracWSManRAID))
self.assertIsInstance(
driver.storage,
kwargs.get('storage', noop_storage.NoopStorage))
self.assertIsInstance(
driver.vendor,
kwargs.get('vendor', drac.vendor_passthru.DracWSManVendorPassthru))
def test_default_interfaces(self):
node = obj_utils.create_test_node(self.context, driver='idrac')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task.driver)
def test_override_with_inspector(self):
node = obj_utils.create_test_node(self.context, driver='idrac',
inspect_interface='inspector')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task.driver,
inspect=inspector.Inspector)
def test_override_with_raid(self):
for iface, impl in [('agent', agent.AgentRAID),
('no-raid', noop.NoRAID)]:
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='idrac',
raid_interface=iface)
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task.driver, raid=impl)
def test_override_with_redfish_raid(self):
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='idrac',
raid_interface='idrac-redfish')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task.driver,
raid=drac.raid.DracRedfishRAID)
def test_override_no_vendor(self):
node = obj_utils.create_test_node(self.context, driver='idrac',
vendor_interface='no-vendor')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(task.driver,
vendor=noop.NoVendor)
def test_override_with_idrac(self):
node = obj_utils.create_test_node(self.context, driver='idrac',
management_interface='idrac',
power_interface='idrac',
inspect_interface='idrac',
raid_interface='idrac',
vendor_interface='idrac')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(
task.driver,
management=drac.management.DracManagement,
power=drac.power.DracPower,
inspect=drac.inspect.DracInspect,
raid=drac.raid.DracRAID,
vendor=drac.vendor_passthru.DracVendorPassthru)
def test_override_with_redfish_management_and_power(self):
node = obj_utils.create_test_node(self.context, driver='idrac',
management_interface='idrac-redfish',
power_interface='idrac-redfish')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(
task.driver,
management=drac.management.DracRedfishManagement,
power=drac.power.DracRedfishPower)
def test_override_with_redfish_bios(self):
node = obj_utils.create_test_node(self.context, driver='idrac',
bios_interface='idrac-redfish')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(
task.driver,
bios=drac.bios.DracRedfishBIOS)
def test_override_with_redfish_inspect(self):
node = obj_utils.create_test_node(self.context, driver='idrac',
inspect_interface='idrac-redfish')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(
task.driver,
inspect=drac.inspect.DracRedfishInspect)
def test_override_with_redfish_virtual_media_boot(self):
node = obj_utils.create_test_node(
self.context, driver='idrac',
boot_interface='idrac-redfish-virtual-media')
with task_manager.acquire(self.context, node.id) as task:
self._validate_interfaces(
task.driver,
boot=drac.boot.DracRedfishVirtualMediaBoot)
|
1d9518e0eaf737d6a901cd600cf22fc7c2372343
|
8d44e796eaf0c8e11bbc2a27ef093e97a25b6f4a
|
/test/agents/test_memory.py
|
92bf79d13978d2e1e796ea2f38613e2a8374476a
|
[
"Apache-2.0"
] |
permissive
|
deepset-ai/haystack
|
caa5287051d1771395ea624b58097000825bad81
|
5f1256ac7e5734c2ea481e72cb7e02c34baf8c43
|
refs/heads/main
| 2023-09-01T02:41:23.490526
| 2023-08-31T15:33:12
| 2023-08-31T15:33:12
| 221,654,678
| 10,599
| 1,558
|
Apache-2.0
| 2023-09-14T17:09:42
| 2019-11-14T09:05:28
|
Python
|
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
test_memory.py
|
import pytest
from typing import Dict, Any
from haystack.agents.memory import NoMemory, ConversationMemory
@pytest.mark.unit
def test_no_memory():
no_mem = NoMemory()
assert no_mem.load() == ""
no_mem.save({"key": "value"})
no_mem.clear()
@pytest.mark.unit
def test_conversation_memory():
conv_mem = ConversationMemory()
assert conv_mem.load() == ""
data: Dict[str, Any] = {"input": "Hello", "output": "Hi there"}
conv_mem.save(data)
assert conv_mem.load() == "Human: Hello\nAI: Hi there\n"
data: Dict[str, Any] = {"input": "How are you?", "output": "I'm doing well, thanks."}
conv_mem.save(data)
assert conv_mem.load() == "Human: Hello\nAI: Hi there\nHuman: How are you?\nAI: I'm doing well, thanks.\n"
assert conv_mem.load(window_size=1) == "Human: How are you?\nAI: I'm doing well, thanks.\n"
conv_mem.clear()
assert conv_mem.load() == ""
@pytest.mark.unit
def test_conversation_memory_window_size():
conv_mem = ConversationMemory()
assert conv_mem.load() == ""
data: Dict[str, Any] = {"input": "Hello", "output": "Hi there"}
conv_mem.save(data)
data: Dict[str, Any] = {"input": "How are you?", "output": "I'm doing well, thanks."}
conv_mem.save(data)
assert conv_mem.load() == "Human: Hello\nAI: Hi there\nHuman: How are you?\nAI: I'm doing well, thanks.\n"
assert conv_mem.load(window_size=1) == "Human: How are you?\nAI: I'm doing well, thanks.\n"
# clear the memory
conv_mem.clear()
assert conv_mem.load() == ""
assert conv_mem.load(window_size=1) == ""
|
5153e4c30abb0dcf272c99ce157b3854817c47ea
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/utils/gapy/errors.py
|
0067e4a5eb27f8662f0233583b07eec6aec0a265
|
[
"Apache-2.0"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 1,422
|
py
|
errors.py
|
#
# Copyright (C) 2019 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors: Germain Haugou, GreenWaves Technologies (germain.haugou@greenwaves-technologies.com)
#
import common
class FatalError(RuntimeError):
"""
Wrapper class for runtime errors that aren't caused by internal bugs.
"""
def __init__(self, message):
RuntimeError.__init__(self, message)
@staticmethod
def WithResult(message, result):
"""
Return a fatal error object that appends the hex values of
'result' as a string formatted argument.
"""
message += " (result was %s)" % common.hexify(result)
return FatalError(message)
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class NotSupportedError(FatalError):
def __init__(self, gap, function_name):
FatalError.__init__(self, "Function %s is not supported for %s." % (function_name, gap.CHIP_NAME))
|
89ce4b5193ec4c7865284906b95a4151f8f9665d
|
c7e0c86a24521a13c3b06c73244e9f5854f47284
|
/scenarios/sumo/intersections/2lane_circle/scenario.py
|
5295dcb110da1d46721641658df7795db243c890
|
[
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"CC-BY-NC-4.0",
"GPL-1.0-or-later",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-2.0-or-later",
"GPL-3.0-or-later",
"BSD-3-Clause",
"MIT",
"LGPL-3.0-or-later",
"BSD-3-Clause-Modification",
"LicenseRef-scancode-free-unknown",
"Zlib",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"LicenseRef-scancode-protobuf",
"LGPL-2.1-only",
"HPND",
"GPL-2.0-only",
"GPL-3.0-only",
"Apache-2.0",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CDDL-1.0"
] |
permissive
|
huawei-noah/SMARTS
|
243d1f1fa4d3afe52a1dd8f7c6c500054d4a1a97
|
2ae8bd76a0b6e4da5699629cec0fefa5aa47ce67
|
refs/heads/master
| 2023-08-31T05:06:29.064270
| 2023-08-28T23:11:31
| 2023-08-28T23:11:31
| 301,903,883
| 824
| 212
|
MIT
| 2023-08-08T14:52:00
| 2020-10-07T02:11:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,943
|
py
|
scenario.py
|
from pathlib import Path
import smarts.sstudio.types as t
from smarts.sstudio import gen_scenario
laner_agent = t.SocialAgentActor(
name="laner-agent",
agent_locator="scenarios.sumo.intersections.2lane_circle.agent_prefabs:laner-agent-v0",
)
buddha_agent = t.SocialAgentActor(
name="buddha-agent",
agent_locator="scenarios.sumo.intersections.2lane_circle.agent_prefabs:buddha-agent-v0",
)
# Replace the above lines with the code below if you want to replay the agent actions and inputs
# laner_agent = t.SocialAgentActor(
# name="laner-agent",
# agent_locator="zoo.policies:replay-agent-v0",
# policy_kwargs={
# "save_directory": "./replay",
# "id": "agent_la",
# "wrapped_agent_locator": "scenarios.sumo.intersections.2lane_circle.agent_prefabs:laner-agent-v0",
# },
# )
# buddha_agent = t.SocialAgentActor(
# name="buddha-agent",
# agent_locator="zoo.policies:replay-agent-v0",
# policy_kwargs={
# "save_directory": "./replay",
# "id": "agent_ba",
# "wrapped_agent_locator": "scenarios.sumo.intersections.2lane_circle.agent_prefabs:buddha-agent-v0",
# },
# )
gen_scenario(
scenario=t.Scenario(
social_agent_missions={
f"s-agent-{laner_agent.name}": (
[laner_agent],
[
t.Mission(
t.Route(
begin=("edge-east-EW", 0, 5), end=("edge-west-EW", 0, 5)
)
)
],
),
f"s-agent-{buddha_agent.name}": (
[buddha_agent],
[
t.Mission(
t.Route(
begin=("edge-west-WE", 0, 5), end=("edge-east-WE", 0, 5)
)
)
],
),
}
),
output_dir=Path(__file__).parent,
)
|
52f2d79531a2151a1e7dadd26d35f3992169ff75
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/0575. Distribute Candies/0575.py
|
2e3a80a31ba04cebafe53c07bd7d735be2a07faf
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 127
|
py
|
0575.py
|
class Solution:
def distributeCandies(self, candies: List[int]) -> int:
return min(len(candies) // 2, len(set(candies)))
|
865393fa87fa40552250272d24be88b9ee3f987f
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stubs/braintree/braintree/local_payment.pyi
|
355a1c486abd97cb7d2ff76d8d1c92aafd4f36a1
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 87
|
pyi
|
local_payment.pyi
|
from braintree.resource import Resource as Resource
class LocalPayment(Resource): ...
|
6ae2544e00f6dfdce92d8cc6f95ea50c835bc078
|
518bf342bc4138982af3e2724e75f1d9ca3ba56c
|
/solutions/2235. Add Two Integers/2235.py
|
bfd0ca7411f1f29fa9dabd2b5ce2290d47ea4758
|
[
"MIT"
] |
permissive
|
walkccc/LeetCode
|
dae85af7cc689882a84ee5011f0a13a19ad97f18
|
a27be41c174565d365cbfe785f0633f634a01b2a
|
refs/heads/main
| 2023-08-28T01:32:43.384999
| 2023-08-20T19:00:45
| 2023-08-20T19:00:45
| 172,231,974
| 692
| 302
|
MIT
| 2023-08-13T14:48:42
| 2019-02-23T15:46:23
|
C++
|
UTF-8
|
Python
| false
| false
| 37
|
py
|
2235.py
|
class Solution:
sum = operator.add
|
075c92fd1a2f4048376a6feb17da48bfbe9ebe12
|
6c5afeeb3eefbfcf5ba6cfc2e7895d8c9dbf83c5
|
/python/prophet/make_holidays.py
|
44da48fbe76d3a03d7989e869224cb5880734bef
|
[
"MIT"
] |
permissive
|
facebook/prophet
|
59a74aa92d27bdc673ceaede02016a9218556cc4
|
2ac9e8fa760e587371e1d1260f3e9f1fac9d76cb
|
refs/heads/main
| 2023-09-01T12:43:34.236541
| 2023-08-21T22:27:06
| 2023-08-21T22:27:06
| 73,872,834
| 13,093
| 4,448
|
MIT
| 2023-08-24T21:49:59
| 2016-11-16T01:50:08
|
Python
|
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
make_holidays.py
|
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
import holidays
def get_country_holidays_class(country):
"""Get class for a supported country.
Parameters
----------
country: country code
Returns
-------
A valid country holidays class
"""
substitutions = {
"TU": "TR", # For compatibility with Turkey as 'TU' cases.
}
country = substitutions.get(country, country)
if not hasattr(holidays, country):
raise AttributeError(f"Holidays in {country} are not currently supported!")
return getattr(holidays, country)
def get_holiday_names(country):
"""Return all possible holiday names of given country
Parameters
----------
country: country name
Returns
-------
A set of all possible holiday names of given country
"""
country_holidays = get_country_holidays_class(country)
return set(country_holidays(language="en_US", years=np.arange(1995, 2045)).values())
def make_holidays_df(year_list, country, province=None, state=None):
"""Make dataframe of holidays for given years and countries
Parameters
----------
year_list: a list of years
country: country name
Returns
-------
Dataframe with 'ds' and 'holiday', which can directly feed
to 'holidays' params in Prophet
"""
country_holidays = get_country_holidays_class(country)
holidays = country_holidays(expand=False, language="en_US", subdiv=province, years=year_list)
holidays_df = pd.DataFrame(
[(date, holidays.get_list(date)) for date in holidays],
columns=["ds", "holiday"],
)
holidays_df = holidays_df.explode("holiday")
holidays_df.reset_index(inplace=True, drop=True)
holidays_df["ds"] = pd.to_datetime(holidays_df["ds"])
return holidays_df
|
46de3c6f3e1484178cf1bbee2635a473050f8e2e
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/fishing/GenusPanel.py
|
7f1f65d7834e84fbf5a4541bcd6fc76477fbca51
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 3,620
|
py
|
GenusPanel.py
|
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
from direct.gui.DirectGui import *
from panda3d.core import *
from toontown.toonbase import TTLocalizer
from . import FishBase
from . import FishGlobals
from . import FishPhoto
class GenusPanel(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('GenusPanel')
def __init__(self, genus = None, itemIndex = 0, *extraArgs):
fishingGui = loader.loadModel('phase_3.5/models/gui/fishingBook')
albumGui = fishingGui.find('**/photo_frame1').copyTo(hidden)
albumGui.find('**/picture_frame').reparentTo(albumGui, -1)
albumGui.find('**/arrows').removeNode()
optiondefs = (('relief', None, None),
('state', DGG.NORMAL, None),
('image', albumGui, None),
('image_scale', (0.025, 0.025, 0.025), None),
('image_pos', (0, 1, 0), None),
('text', TTLocalizer.UnknownFish, None),
('text_scale', 0.065, None),
('text_fg', (0.2, 0.1, 0.0, 1), None),
('text_pos', (-0.5, -0.34), None),
('text_font', ToontownGlobals.getInterfaceFont(), None),
('text_wordwrap', 13.5, None),
('text_align', TextNode.ALeft, None))
self.defineoptions({}, optiondefs)
DirectFrame.__init__(self)
self.initialiseoptions(GenusPanel)
self.fishPanel = None
self.genus = None
self.setGenus(int(genus))
self.setScale(1.2)
albumGui.removeNode()
return
def destroy(self):
if self.fishPanel:
self.fishPanel.destroy()
del self.fishPanel
DirectFrame.destroy(self)
def load(self):
pass
def setGenus(self, genus):
if self.genus == genus:
return
self.genus = genus
if self.genus != None:
if self.fishPanel:
self.fishPanel.destroy()
f = FishBase.FishBase(self.genus, 0, 0)
self.fishPanel = FishPhoto.FishPhoto(fish=f, parent=self)
self.fishPanel.setPos(-0.23, 1, -0.01)
self.fishPanel.setSwimBounds(-0.2461, 0.2367, -0.207, 0.2664)
self.fishPanel.setSwimColor(0.47, 1.0, 0.99, 1.0)
speciesList = FishGlobals.getSpecies(self.genus)
self.speciesLabels = []
offset = 0.075
startPos = len(speciesList) / 2 * offset
if not len(speciesList) % 2:
startPos -= offset / 2
for species in range(len(speciesList)):
label = DirectLabel(parent=self, relief=None, state=DGG.NORMAL, pos=(0.06, 0, startPos - species * offset), text=TTLocalizer.UnknownFish, text_fg=(0.2, 0.1, 0.0, 1), text_scale=TTLocalizer.GPgenus, text_align=TextNode.ALeft, text_font=ToontownGlobals.getInterfaceFont())
self.speciesLabels.append(label)
return
def show(self):
self.update()
DirectFrame.show(self)
def hide(self):
if self.fishPanel is not None:
self.fishPanel.hide()
DirectFrame.hide(self)
return
def update(self):
if base.localAvatar.fishCollection.hasGenus(self.genus) and self.fishPanel is not None:
self.fishPanel.show(showBackground=1)
self['text'] = TTLocalizer.FishGenusNames[self.genus]
for species in range(len(FishGlobals.getSpecies(self.genus))):
if base.localAvatar.fishCollection.hasFish(self.genus, species):
self.speciesLabels[species]['text'] = TTLocalizer.FishSpeciesNames[self.genus][species]
return
|
e7a2acf6833906687cd7c63da40c3d7949cb6931
|
bb4e12f9155e7f67cb3974c8405253a0a5ffa516
|
/examples/ports/multi_receive.py
|
7217019c901c22061b07a7d116b91639662119e7
|
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
mido/mido
|
80c5c05e3eb38051253ccf40caf6ac7b917cc066
|
6970e045f1e66314ee266e8fb16432df75f6e87e
|
refs/heads/main
| 2023-08-15T09:47:38.967908
| 2023-08-07T18:46:25
| 2023-08-07T18:47:14
| 10,889,417
| 937
| 160
|
MIT
| 2023-08-08T07:11:05
| 2013-06-23T18:11:38
|
Python
|
UTF-8
|
Python
| false
| false
| 522
|
py
|
multi_receive.py
|
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2013 Ole Martin Bjorndalen <ombdalen@gmail.com>
#
# SPDX-License-Identifier: MIT
"""
Receive messages from multiple ports.
"""
import mido
from mido.ports import multi_receive
# Open all available inputs.
ports = [mido.open_input(name) for name in mido.get_input_names()]
for port in ports:
print(f'Using {port}')
print('Waiting for messages...')
try:
for message in multi_receive(ports):
print(f'Received {message}')
except KeyboardInterrupt:
pass
|
bf77f9251420310334e2968e74e9731d34c7ecc8
|
524d27085e9e424d06b13c5ce7b5337559da9281
|
/src/dnadiffusion/utils/train_util.py
|
4c87efc29956382bae41d6a19577194ccd5206d8
|
[
"MIT"
] |
permissive
|
pinellolab/DNA-Diffusion
|
6ae3822ea71b24345d1e2ab47faa9d6a92058af7
|
4c31977bc8102beab9e14e82073d62099e48f47e
|
refs/heads/main
| 2023-08-08T02:16:01.787934
| 2023-07-13T03:14:33
| 2023-07-13T03:14:33
| 537,093,072
| 260
| 46
|
MIT
| 2023-09-14T21:04:24
| 2022-09-15T15:35:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,916
|
py
|
train_util.py
|
import copy
from typing import Any, Dict
import torch
import torchvision.transforms as T
from accelerate import Accelerator
from torch.optim import Adam
from torch.utils.data import DataLoader
from tqdm import tqdm
from dnadiffusion.data.dataloader import SequenceDataset
from dnadiffusion.metrics.metrics import compare_motif_list, generate_similarity_using_train
from dnadiffusion.utils.sample_util import create_sample
from dnadiffusion.utils.utils import EMA
class TrainLoop:
def __init__(
self,
data: Dict[str, Any],
model: torch.nn.Module,
accelerator: Accelerator,
epochs: int = 10000,
loss_show_epoch: int = 10,
sample_epoch: int = 100,
save_epoch: int = 500,
model_name: str = "model_48k_sequences_per_group_K562_hESCT0_HepG2_GM12878_12k",
image_size: int = 200,
num_sampling_to_compare_cells: int = 1000,
batch_size: int = 960,
):
self.encode_data = data
self.model = model
self.optimizer = Adam(self.model.parameters(), lr=1e-4)
self.accelerator = accelerator
self.epochs = epochs
self.loss_show_epoch = loss_show_epoch
self.sample_epoch = sample_epoch
self.save_epoch = save_epoch
self.model_name = model_name
self.image_size = image_size
self.num_sampling_to_compare_cells = num_sampling_to_compare_cells
if self.accelerator.is_main_process:
self.ema = EMA(0.995)
self.ema_model = copy.deepcopy(self.model).eval().requires_grad_(False)
# Metrics
self.train_kl, self.test_kl, self.shuffle_kl = 1, 1, 1
self.seq_similarity = 1
self.start_epoch = 0
# Dataloader
seq_dataset = SequenceDataset(seqs=self.encode_data["X_train"], c=self.encode_data["x_train_cell_type"])
self.train_dl = DataLoader(seq_dataset, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True)
def train_loop(self):
# Prepare for training
self.model, self.optimizer, self.train_dl = self.accelerator.prepare(self.model, self.optimizer, self.train_dl)
# Initialize wandb
if self.accelerator.is_main_process:
self.accelerator.init_trackers(
"dnadiffusion",
init_kwargs={"wandb": {"notes": "testing wandb accelerate script"}},
)
for epoch in tqdm(range(self.start_epoch, self.epochs)):
self.model.train()
# Getting loss of current batch
for _, batch in enumerate(self.train_dl):
loss = self.train_step(batch)
# Logging loss
if (epoch + 1) % self.loss_show_epoch == 0 and self.accelerator.is_main_process:
self.log_step(loss, epoch)
# Sampling
if (epoch + 1) % self.sample_epoch == 0 and self.accelerator.is_main_process:
self.sample()
# Saving model
if (epoch + 1) % self.save_epoch == 0 and self.accelerator.is_main_process:
self.save_model(epoch)
def train_step(self, batch):
x, y = batch
with self.accelerator.autocast():
loss = self.model(x, y)
self.optimizer.zero_grad()
self.accelerator.backward(loss)
self.accelerator.wait_for_everyone()
self.optimizer.step()
self.accelerator.wait_for_everyone()
if self.accelerator.is_main_process:
self.ema.step_ema(self.ema_model, self.accelerator.unwrap_model(self.model))
self.accelerator.wait_for_everyone()
return loss
def log_step(self, loss, epoch):
if self.accelerator.is_main_process:
self.accelerator.log(
{
"train": self.train_kl,
"test": self.test_kl,
"shuffle": self.shuffle_kl,
"loss": loss.item(),
"seq_similarity": self.seq_similarity,
},
step=epoch,
)
print(f" Epoch {epoch} Loss:", loss.item())
def sample(self):
self.model.eval()
# Sample from the model
print("saving")
synt_df = create_sample(
self.accelerator.unwrap_model(self.model),
conditional_numeric_to_tag=self.encode_data["numeric_to_tag"],
cell_types=self.encode_data["cell_types"],
number_of_samples=int(self.num_sampling_to_compare_cells / 10),
)
self.seq_similarity = generate_similarity_using_train(self.encode_data["X_train"])
self.train_kl = compare_motif_list(synt_df, self.encode_data["train_motifs"])
self.test_kl = compare_motif_list(synt_df, self.encode_data["test_motifs"])
self.shuffle_kl = compare_motif_list(synt_df, self.encode_data["shuffle_motifs"])
print("Similarity", self.seq_similarity, "Similarity")
print("KL_TRAIN", self.train_kl, "KL")
print("KL_TEST", self.test_kl, "KL")
print("KL_SHUFFLE", self.shuffle_kl, "KL")
def save_model(self, epoch):
checkpoint_dict = {
"model": self.accelerator.get_state_dict(self.model),
"optimizer": self.optimizer.state_dict(),
"epoch": epoch,
"ema_model": self.accelerator.get_state_dict(self.ema_model),
}
torch.save(
checkpoint_dict,
f"dnadiffusion/checkpoints/epoch_{epoch}_{self.model_name}.pt",
)
def load(self, path):
checkpoint_dict = torch.load(path)
self.model.load_state_dict(checkpoint_dict["model"])
self.optimizer.load_state_dict(checkpoint_dict["optimizer"])
self.start_epoch = checkpoint_dict["epoch"]
if self.accelerator.is_main_process:
self.ema_model.load_state_dict(checkpoint_dict["ema_model"])
self.train_loop()
|
d2c420cc10c1ba2d7c37098d98d4d15ce4ef6854
|
787022de03a2dd6998c1518673830395b389e3df
|
/migration/migrator/migrations/system/20190625135839_email_enabled_field.py
|
e66758a2fa54e3e9324daf01149fc69ad6327f6b
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
Submitty/Submitty
|
e6b8731656291a025aa77f928eb067bc9a307540
|
b223d9e952bcdb8664721a55593bc75e0e3c8c4f
|
refs/heads/main
| 2023-08-31T23:56:11.291752
| 2023-08-31T19:12:18
| 2023-08-31T19:12:18
| 16,236,118
| 592
| 727
|
BSD-3-Clause
| 2023-09-13T05:36:08
| 2014-01-25T17:43:57
|
PHP
|
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
20190625135839_email_enabled_field.py
|
"""
Migration for the Submitty system.
adds email_enabled boolean to config/email.json
"""
from pathlib import Path
import json
import os
def up(config):
email_filename = str(Path(config.submitty['submitty_install_dir'], 'config', 'email.json'))
# read json and add email_enabled field
try:
with open(email_filename,'r') as open_file:
email_json = json.load(open_file)
email_json['email_enabled'] = True
except FileNotFoundError:
email_json = {
'email_enabled': True,
'email_user': '',
'email_password': '',
'email_sender': 'submitty@myuniversity.edu',
'email_reply_to': 'submitty_do_not_reply@myuniversity.edu',
'email_server_hostname': 'mail.myuniversity.edu',
'email_server_port': 25
}
# write file again with new json
with open(email_filename, 'w') as open_file:
json.dump(email_json, open_file, indent=2)
# no need for down as email_enabled is not used in previous builds
def down(config):
pass
|
1e7e12dec093425791a91bc58864ebda2412a14b
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Pdf_docx_pptx_xlsx_epub_png/source/docx/shape.py
|
e4f885d7344cfdd6c358012d9881d8f74cbaf158
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 2,811
|
py
|
shape.py
|
# encoding: utf-8
"""
Objects related to shapes, visual objects that appear on the drawing layer of
a document.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from .enum.shape import WD_INLINE_SHAPE
from .oxml.ns import nsmap
from .shared import Parented
class InlineShapes(Parented):
"""
Sequence of |InlineShape| instances, supporting len(), iteration, and
indexed access.
"""
def __init__(self, body_elm, parent):
super(InlineShapes, self).__init__(parent)
self._body = body_elm
def __getitem__(self, idx):
"""
Provide indexed access, e.g. 'inline_shapes[idx]'
"""
try:
inline = self._inline_lst[idx]
except IndexError:
msg = "inline shape index [%d] out of range" % idx
raise IndexError(msg)
return InlineShape(inline)
def __iter__(self):
return (InlineShape(inline) for inline in self._inline_lst)
def __len__(self):
return len(self._inline_lst)
@property
def _inline_lst(self):
body = self._body
xpath = '//w:p/w:r/w:drawing/wp:inline'
return body.xpath(xpath)
class InlineShape(object):
"""
Proxy for an ``<wp:inline>`` element, representing the container for an
inline graphical object.
"""
def __init__(self, inline):
super(InlineShape, self).__init__()
self._inline = inline
@property
def height(self):
"""
Read/write. The display height of this inline shape as an |Emu|
instance.
"""
return self._inline.extent.cy
@height.setter
def height(self, cy):
self._inline.extent.cy = cy
self._inline.graphic.graphicData.pic.spPr.cy = cy
@property
def type(self):
"""
The type of this inline shape as a member of
``docx.enum.shape.WD_INLINE_SHAPE``, e.g. ``LINKED_PICTURE``.
Read-only.
"""
graphicData = self._inline.graphic.graphicData
uri = graphicData.uri
if uri == nsmap['pic']:
blip = graphicData.pic.blipFill.blip
if blip.link is not None:
return WD_INLINE_SHAPE.LINKED_PICTURE
return WD_INLINE_SHAPE.PICTURE
if uri == nsmap['c']:
return WD_INLINE_SHAPE.CHART
if uri == nsmap['dgm']:
return WD_INLINE_SHAPE.SMART_ART
return WD_INLINE_SHAPE.NOT_IMPLEMENTED
@property
def width(self):
"""
Read/write. The display width of this inline shape as an |Emu|
instance.
"""
return self._inline.extent.cx
@width.setter
def width(self, cx):
self._inline.extent.cx = cx
self._inline.graphic.graphicData.pic.spPr.cx = cx
|
6741fc8a9593ed299b4b11438cbf730249faf7e8
|
364774e29ef2474552ea3839de0951e63cbae0a6
|
/wouso/core/magic/admin.py
|
d22a68f7fb012c98f2a752f1983ac7fa2779f675
|
[
"Apache-2.0"
] |
permissive
|
rosedu/wouso
|
66c50ef750cf79d6959768f7df93cc08607cc266
|
ed34c62ac925db719388f27fe5acb40376d8d0c1
|
refs/heads/master
| 2022-10-29T14:28:51.818073
| 2022-09-24T18:54:04
| 2022-09-24T18:54:04
| 2,965,476
| 121
| 97
|
NOASSERTION
| 2019-11-15T09:33:50
| 2011-12-12T16:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 330
|
py
|
admin.py
|
from django.contrib import admin
from models import Artifact, ArtifactGroup, Spell
class SpellAdmin(admin.ModelAdmin):
list_display = ('name', 'title', 'type', 'percents', 'price', 'level_required', 'available', 'mass')
admin.site.register(Artifact)
admin.site.register(ArtifactGroup)
admin.site.register(Spell, SpellAdmin)
|
696af55cebf107db0eaa3c49fe48d1d8b5d4279e
|
21be7833b4935fb4a8f39b816fe868d6cda78b07
|
/termius/porting/__init__.py
|
c1e83a072087f14d46be3267186cc025716fa5ec
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
termius/termius-cli
|
dd45295dada12ee5dbd7a864e6fcf634dfbd02a3
|
2664d0c70d3d682ad931b885b4965447b156c280
|
refs/heads/master
| 2023-09-05T18:53:04.812354
| 2021-04-05T04:10:56
| 2021-04-05T04:10:56
| 10,905,793
| 262
| 41
|
NOASSERTION
| 2023-03-30T21:40:42
| 2013-06-24T11:00:03
|
Python
|
UTF-8
|
Python
| false
| false
| 188
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
"""Synchronize SaaS and IaaS hosts with application hosts.
Retrieve service complete host list by service's name and merge the hosts
into application storage.
"""
|
e98dd28c5a78790e322343a2514e3adf9d5b184a
|
3093dd966d4019f5e96f335677716afd6ffad7ac
|
/test/test_dep_versioning.py
|
415f9188c20bb7ed84e05904e12cbade289210e6
|
[
"MIT"
] |
permissive
|
sourcegraph/python-langserver
|
db62fda4e025d5a3241e33b63d5380f585cb444d
|
214b2717b44a5bd7aaf4ac077ac1b9054de064ec
|
refs/heads/master
| 2021-10-24T16:33:53.998475
| 2019-03-26T23:10:36
| 2019-03-26T23:10:36
| 66,597,766
| 118
| 11
|
MIT
| 2018-06-29T20:40:13
| 2016-08-25T22:32:13
|
Python
|
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
test_dep_versioning.py
|
from .harness import Harness
import uuid
import pytest
@pytest.fixture(params=[
# tuples of the repo for the test, along
# with the expected doc_string for the hover
# in that repo
("repos/dep_versioning_fixed", "this is version 0.1"),
("repos/dep_versioning_between", "this is version 0.4"),
("repos/dep_versioning_between_multiple", "this is version 0.4"),
("repos/dep_versioning_none", "this is version 0.6")
])
def test_data(request):
repo_path, expected_doc_string = request.param
workspace = Harness(repo_path)
workspace.initialize(repo_path + str(uuid.uuid4()))
yield (workspace, expected_doc_string)
workspace.exit()
class TestDependencyVersioning:
def test_dep_download_specified_version(self, test_data):
workspace, expected_doc_string = test_data
uri = "file:///test.py"
character, line = 6, 2
result = workspace.hover(uri, line, character)
assert result == {
'contents': [
{
'language': 'python',
'value': 'def testfunc()'
},
expected_doc_string
]
}
|
44b45eee2e843f6644788f858fe0ef8e3d9b32c8
|
906ee9b2b75e48b05c7ec181bc02e019da24a162
|
/node/flatpak_node_generator/providers/yarn.py
|
e18c26e844ed0ba1f7b33885a922a13c78baf5c0
|
[] |
no_license
|
flatpak/flatpak-builder-tools
|
ff401c4c074533119d879f79c0ac35b5fc8ff7c7
|
de56f4702638739f930f4afa648686f12ac4d724
|
refs/heads/master
| 2023-09-01T20:51:19.727717
| 2023-08-24T16:38:04
| 2023-08-24T17:42:59
| 114,991,815
| 164
| 114
| null | 2023-09-09T18:55:37
| 2017-12-21T10:08:07
|
Python
|
UTF-8
|
Python
| false
| false
| 6,677
|
py
|
yarn.py
|
from pathlib import Path
from typing import Any, Dict, Iterator, List, Optional, Tuple, Type
import os
import re
import shlex
import types
import urllib.parse
from ..integrity import Integrity
from ..manifest import ManifestGenerator
from ..package import GitSource, LocalSource, Package, PackageSource, ResolvedSource
from . import LockfileProvider, ModuleProvider, ProviderFactory, RCFileProvider
from .npm import NpmRCFileProvider
from .special import SpecialSourceProvider
GIT_URL_PATTERNS = [
re.compile(r'^git:'),
re.compile(r'^git\+.+:'),
re.compile(r'^ssh:'),
re.compile(r'^https?:.+\.git$'),
re.compile(r'^https?:.+\.git#.+'),
]
GIT_URL_HOSTS = ['github.com', 'gitlab.com', 'bitbucket.com', 'bitbucket.org']
class YarnLockfileProvider(LockfileProvider):
_LOCAL_PKG_RE = re.compile(r'^(?:file|link):')
@staticmethod
def is_git_version(version: str) -> bool:
for pattern in GIT_URL_PATTERNS:
if pattern.match(version):
return True
url = urllib.parse.urlparse(version)
if url.netloc in GIT_URL_HOSTS:
return len([p for p in url.path.split('/') if p]) == 2
return False
def parse_lockfile(self, lockfile: Path) -> Dict[str, Any]:
def _iter_lines() -> Iterator[Tuple[int, str]]:
indent = ' '
for line in lockfile.open():
level = 0
while line.startswith(indent):
level += 1
line = line[len(indent) :]
yield level, line.strip()
root_entry: Dict[str, Any] = {}
parent_entries = [root_entry]
for level, line in _iter_lines():
if line.startswith('#') or not line:
continue
assert level <= len(parent_entries) - 1
parent_entries = parent_entries[: level + 1]
if line.endswith(':'):
key = line[:-1]
child_entry = parent_entries[-1][key] = {}
parent_entries.append(child_entry)
else:
# NOTE shlex.split is handy, but slow;
# to speed up parsing we can use something less robust, e.g.
# _key, _value = line.split(' ', 1)
# parent_entries[-1][self.unquote(_key)] = self.unquote(_value)
key, value = shlex.split(line)
parent_entries[-1][key] = value
return root_entry
def unquote(self, string: str) -> str:
if string.startswith('"'):
assert string.endswith('"')
return string[1:-1]
else:
return string
def process_package(
self, lockfile: Path, name_line: str, entry: Dict[str, Any]
) -> Package:
assert name_line and entry
name = self.unquote(name_line.split(',', 1)[0])
name, version_constraint = name.rsplit('@', 1)
source: PackageSource
if self._LOCAL_PKG_RE.match(version_constraint):
source = LocalSource(path=self._LOCAL_PKG_RE.sub('', version_constraint))
else:
if self.is_git_version(entry['resolved']):
source = self.parse_git_source(version=entry['resolved'])
else:
if 'integrity' in entry:
integrity = Integrity.parse(entry['integrity'])
else:
integrity = None
source = ResolvedSource(resolved=entry['resolved'], integrity=integrity)
return Package(
name=name, version=entry['version'], source=source, lockfile=lockfile
)
def process_lockfile(self, lockfile: Path) -> Iterator[Package]:
for name_line, package in self.parse_lockfile(lockfile).items():
yield self.process_package(lockfile, name_line, package)
class YarnRCFileProvider(RCFileProvider):
RCFILE_NAME = '.yarnrc'
class YarnModuleProvider(ModuleProvider):
# From https://github.com/yarnpkg/yarn/blob/v1.22.4/src/fetchers/tarball-fetcher.js
_PACKAGE_TARBALL_URL_RE = re.compile(
r'(?:(@[^/]+)(?:/|%2f))?[^/]+/(?:-|_attachments)/(?:@[^/]+/)?([^/]+)$'
)
def __init__(self, gen: ManifestGenerator, special: SpecialSourceProvider) -> None:
self.gen = gen
self.special_source_provider = special
self.mirror_dir = self.gen.data_root / 'yarn-mirror'
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
tb: Optional[types.TracebackType],
) -> None:
pass
async def generate_package(self, package: Package) -> None:
source = package.source
if isinstance(source, ResolvedSource):
integrity = await source.retrieve_integrity()
url_parts = urllib.parse.urlparse(source.resolved)
match = self._PACKAGE_TARBALL_URL_RE.search(url_parts.path)
if match is not None:
scope, filename = match.groups()
if scope:
filename = f'{scope}-{filename}'
else:
filename = os.path.basename(url_parts.path)
self.gen.add_url_source(
source.resolved, integrity, self.mirror_dir / filename
)
elif isinstance(source, GitSource):
repo_name = urllib.parse.urlparse(source.url).path.split('/')[-1]
name = f'{repo_name}-{source.commit}'
repo_dir = self.gen.tmp_root / name
target_tar = os.path.relpath(self.mirror_dir / name, repo_dir)
self.gen.add_git_source(source.url, source.commit, repo_dir)
self.gen.add_command(f'mkdir -p {self.mirror_dir}')
self.gen.add_command(
f'cd {repo_dir}; git archive --format tar -o {target_tar} HEAD'
)
elif isinstance(source, LocalSource):
assert (package.lockfile.parent / source.path / 'package.json').is_file()
else:
raise NotImplementedError(
f'Unknown source type {source.__class__.__name__}'
)
await self.special_source_provider.generate_special_sources(package)
class YarnProviderFactory(ProviderFactory):
def __init__(self) -> None:
pass
def create_lockfile_provider(self) -> YarnLockfileProvider:
return YarnLockfileProvider()
def create_rcfile_providers(self) -> List[RCFileProvider]:
return [YarnRCFileProvider(), NpmRCFileProvider()]
def create_module_provider(
self, gen: ManifestGenerator, special: SpecialSourceProvider
) -> YarnModuleProvider:
return YarnModuleProvider(gen, special)
|
fdcb82d9a69296e7ca152837aed6a360b00d0c21
|
29f18e8ddde0379cef7fa00b1a50058be3cafa79
|
/numba/tests/test_parfors.py
|
2fc5db6feabe070969584a84bb86d6e9890f2833
|
[
"LicenseRef-scancode-secret-labs-2011",
"BSD-3-Clause",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-free-unknown",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"BSD-2-Clause"
] |
permissive
|
numba/numba
|
9a8345ff5f7d57f0ffec40e39941ebf2684df0d1
|
46059957ad416e68476d1e5f32ccd59f7d5df2bb
|
refs/heads/main
| 2023-08-09T22:29:38.170300
| 2023-08-07T15:00:27
| 2023-08-07T15:00:27
| 3,659,275
| 8,247
| 1,151
|
BSD-2-Clause
| 2023-09-13T14:43:48
| 2012-03-08T11:12:43
|
Python
|
UTF-8
|
Python
| false
| false
| 158,774
|
py
|
test_parfors.py
|
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import math
import re
import dis
import numbers
import os
import platform
import sys
import subprocess
import types as pytypes
import warnings
from functools import reduce
import numpy as np
from numpy.random import randn
import operator
from collections import defaultdict, namedtuple
import copy
from itertools import cycle, chain
import subprocess as subp
import numba.parfors.parfor
from numba import (njit, prange, parallel_chunksize,
get_parallel_chunksize, set_parallel_chunksize,
set_num_threads, get_num_threads, typeof)
from numba.core import (types, typing, errors, ir, rewrites,
typed_passes, inline_closurecall, config, compiler, cpu)
from numba.extending import (overload_method, register_model,
typeof_impl, unbox, NativeValue, models)
from numba.core.registry import cpu_target
from numba.core.annotations import type_annotations
from numba.core.ir_utils import (find_callname, guard, build_definitions,
get_definition, is_getitem, is_setitem,
index_var_of_get_setitem)
from numba.np.unsafe.ndarray import empty_inferred as unsafe_empty
from numba.core.bytecode import ByteCodeIter
from numba.core.compiler import (compile_isolated, Flags, CompilerBase,
DefaultPassBuilder)
from numba.core.compiler_machinery import register_pass, AnalysisPass
from numba.core.typed_passes import IRLegalization
from numba.tests.support import (TestCase, captured_stdout, MemoryLeakMixin,
override_env_config, linux_only, tag,
skip_parfors_unsupported, _32bit, needs_blas,
needs_lapack, disabled_test, skip_unless_scipy,
needs_subprocess)
from numba.core.extending import register_jitable
from numba.core.bytecode import _fix_LOAD_GLOBAL_arg
from numba.core import utils
import cmath
import unittest
# NOTE: Each parfors test class is run in separate subprocess, this is to reduce
# memory pressure in CI settings. The environment variable "SUBPROC_TEST" is
# used to determine whether a test is skipped or not, such that if you want to
# run any parfors test directly this environment variable can be set. The
# subprocesses running the test classes set this environment variable as the new
# process starts which enables the tests within the process. The decorator
# @needs_subprocess is used to ensure the appropriate test skips are made.
@skip_parfors_unsupported
class TestParforsRunner(TestCase):
_numba_parallel_test_ = False
# Each test class can run for 30 minutes before time out. Extend this to an
# hour on aarch64 (some public CI systems were timing out).
_TIMEOUT = 1800 if platform.machine() != 'aarch64' else 3600
"""This is the test runner for all the parfors tests, it runs them in
subprocesses as described above. The convention for the test method naming
is: `test_<TestClass>` where <TestClass> is the name of the test class in
this module.
"""
def runner(self):
themod = self.__module__
test_clazz_name = self.id().split('.')[-1].split('_')[-1]
# don't specify a given test, it's an entire class that needs running
self.subprocess_test_runner(test_module=themod,
test_class=test_clazz_name,
timeout=self._TIMEOUT)
def test_TestParforBasic(self):
self.runner()
def test_TestParforNumericalMisc(self):
self.runner()
def test_TestParforNumPy(self):
self.runner()
def test_TestParfors(self):
self.runner()
def test_TestParforsBitMask(self):
self.runner()
def test_TestParforsDiagnostics(self):
self.runner()
def test_TestParforsLeaks(self):
self.runner()
def test_TestParforsMisc(self):
self.runner()
def test_TestParforsOptions(self):
self.runner()
def test_TestParforsSlice(self):
self.runner()
def test_TestParforsVectorizer(self):
self.runner()
def test_TestPrangeBasic(self):
self.runner()
def test_TestPrangeSpecific(self):
self.runner()
x86_only = unittest.skipIf(platform.machine() not in ('i386', 'x86_64'), 'x86 only test')
_GLOBAL_INT_FOR_TESTING1 = 17
_GLOBAL_INT_FOR_TESTING2 = 5
TestNamedTuple = namedtuple('TestNamedTuple', ('part0', 'part1'))
def null_comparer(a, b):
"""
Used with check_arq_equality to indicate that we do not care
whether the value of the parameter at the end of the function
has a particular value.
"""
pass
@needs_subprocess
class TestParforsBase(TestCase):
"""
Base class for testing parfors.
Provides functions for compilation and three way comparison between
python functions, njit'd functions and parfor njit'd functions.
"""
_numba_parallel_test_ = False
def __init__(self, *args):
# flags for njit()
self.cflags = Flags()
self.cflags.nrt = True
# flags for njit(parallel=True)
self.pflags = Flags()
self.pflags.auto_parallel = cpu.ParallelOptions(True)
self.pflags.nrt = True
# flags for njit(parallel=True, fastmath=True)
self.fast_pflags = Flags()
self.fast_pflags.auto_parallel = cpu.ParallelOptions(True)
self.fast_pflags.nrt = True
self.fast_pflags.fastmath = cpu.FastMathOptions(True)
super(TestParforsBase, self).__init__(*args)
def _compile_this(self, func, sig, flags):
return compile_isolated(func, sig, flags=flags)
def compile_parallel(self, func, sig):
return self._compile_this(func, sig, flags=self.pflags)
def compile_parallel_fastmath(self, func, sig):
return self._compile_this(func, sig, flags=self.fast_pflags)
def compile_njit(self, func, sig):
return self._compile_this(func, sig, flags=self.cflags)
def compile_all(self, pyfunc, *args, **kwargs):
sig = tuple([numba.typeof(x) for x in args])
# compile the prange injected function
cpfunc = self.compile_parallel(pyfunc, sig)
# compile a standard njit of the original function
cfunc = self.compile_njit(pyfunc, sig)
return cfunc, cpfunc
def check_parfors_vs_others(self, pyfunc, cfunc, cpfunc, *args, **kwargs):
"""
Checks python, njit and parfor impls produce the same result.
Arguments:
pyfunc - the python function to test
cfunc - CompilerResult from njit of pyfunc
cpfunc - CompilerResult from njit(parallel=True) of pyfunc
args - arguments for the function being tested
Keyword Arguments:
scheduler_type - 'signed', 'unsigned' or None, default is None.
Supply in cases where the presence of a specific
scheduler is to be asserted.
fastmath_pcres - a fastmath parallel compile result, if supplied
will be run to make sure the result is correct
check_arg_equality - some functions need to check that a
parameter is modified rather than a certain
value returned. If this keyword argument
is supplied, it should be a list of
comparison functions such that the i'th
function in the list is used to compare the
i'th parameter of the njit and parallel=True
functions against the i'th parameter of the
standard Python function, asserting if they
differ. The length of this list must be equal
to the number of parameters to the function.
The null comparator is available for use
when you do not desire to test if some
particular parameter is changed.
Remaining kwargs are passed to np.testing.assert_almost_equal
"""
scheduler_type = kwargs.pop('scheduler_type', None)
check_fastmath = kwargs.pop('check_fastmath', None)
fastmath_pcres = kwargs.pop('fastmath_pcres', None)
check_scheduling = kwargs.pop('check_scheduling', True)
check_args_for_equality = kwargs.pop('check_arg_equality', None)
def copy_args(*args):
if not args:
return tuple()
new_args = []
for x in args:
if isinstance(x, np.ndarray):
new_args.append(x.copy('k'))
elif isinstance(x, np.number):
new_args.append(x.copy())
elif isinstance(x, numbers.Number):
new_args.append(x)
elif x is None:
new_args.append(x)
elif isinstance(x, tuple):
new_args.append(copy.deepcopy(x))
elif isinstance(x, list):
new_args.append(x[:])
else:
raise ValueError('Unsupported argument type encountered')
return tuple(new_args)
# python result
py_args = copy_args(*args)
py_expected = pyfunc(*py_args)
# njit result
njit_args = copy_args(*args)
njit_output = cfunc.entry_point(*njit_args)
# parfor result
parfor_args = copy_args(*args)
parfor_output = cpfunc.entry_point(*parfor_args)
if check_args_for_equality is None:
np.testing.assert_almost_equal(njit_output, py_expected, **kwargs)
np.testing.assert_almost_equal(parfor_output, py_expected, **kwargs)
self.assertEqual(type(njit_output), type(parfor_output))
else:
assert(len(py_args) == len(check_args_for_equality))
for pyarg, njitarg, parforarg, argcomp in zip(
py_args, njit_args, parfor_args, check_args_for_equality):
argcomp(njitarg, pyarg, **kwargs)
argcomp(parforarg, pyarg, **kwargs)
if check_scheduling:
self.check_scheduling(cpfunc, scheduler_type)
# if requested check fastmath variant
if fastmath_pcres is not None:
parfor_fastmath_output = fastmath_pcres.entry_point(*copy_args(*args))
np.testing.assert_almost_equal(parfor_fastmath_output, py_expected,
**kwargs)
def check(self, pyfunc, *args, **kwargs):
"""Checks that pyfunc compiles for *args under parallel=True and njit
and asserts that all version execute and produce the same result"""
cfunc, cpfunc = self.compile_all(pyfunc, *args)
self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs)
def check_variants(self, impl, arg_gen, **kwargs):
"""Run self.check(impl, ...) on array data generated from arg_gen.
"""
for args in arg_gen():
with self.subTest(list(map(typeof, args))):
self.check(impl, *args, **kwargs)
def count_parfors_variants(self, impl, arg_gen, **kwargs):
"""Run self.countParfors(impl, ...) on array types generated from
arg_gen.
"""
for args in arg_gen():
with self.subTest(list(map(typeof, args))):
argtys = tuple(map(typeof, args))
# At least one parfors
self.assertGreaterEqual(countParfors(impl, argtys), 1)
def check_scheduling(self, cres, scheduler_type):
# make sure parfor set up scheduling
scheduler_str = '@do_scheduling'
if scheduler_type is not None:
if scheduler_type in ['signed', 'unsigned']:
scheduler_str += '_' + scheduler_type
else:
msg = "Unknown scheduler_type specified: %s"
raise ValueError(msg % scheduler_type)
self.assertIn(scheduler_str, cres.library.get_llvm_str())
def gen_linspace(self, n, ct):
"""Make *ct* sample 1D arrays of length *n* using np.linspace().
"""
def gen():
yield np.linspace(0, 1, n)
yield np.linspace(2, 1, n)
yield np.linspace(1, 2, n)
src = cycle(gen())
return [next(src) for i in range(ct)]
def gen_linspace_variants(self, ct):
"""Make 1D, 2D, 3D variants of the data in C and F orders
"""
# 1D
yield self.gen_linspace(10, ct=ct)
# 2D
arr2ds = [x.reshape((2, 3))
for x in self.gen_linspace(n=2 * 3, ct=ct)]
yield arr2ds
# Fortran order
yield [np.asfortranarray(x) for x in arr2ds]
# 3D
arr3ds = [x.reshape((2, 3, 4))
for x in self.gen_linspace(n=2 * 3 * 4, ct=ct)]
yield arr3ds
# Fortran order
yield [np.asfortranarray(x) for x in arr3ds]
def _filter_mod(self, mod, magicstr, checkstr=None):
""" helper function to filter out modules by name"""
filt = [x for x in mod if magicstr in x.name]
if checkstr is not None:
for x in filt:
assert checkstr in str(x)
return filt
def _get_gufunc_modules(self, cres, magicstr, checkstr=None):
""" gets the gufunc LLVM Modules"""
_modules = [x for x in cres.library._codegen._engine._ee._modules]
return self._filter_mod(_modules, magicstr, checkstr=checkstr)
def _get_gufunc_info(self, cres, fn):
""" helper for gufunc IR/asm generation"""
# get the gufunc modules
magicstr = '__numba_parfor_gufunc'
gufunc_mods = self._get_gufunc_modules(cres, magicstr)
x = dict()
for mod in gufunc_mods:
x[mod.name] = fn(mod)
return x
def _get_gufunc_ir(self, cres):
"""
Returns the IR of the gufuncs used as parfor kernels
as a dict mapping the gufunc name to its IR.
Arguments:
cres - a CompileResult from `njit(parallel=True, ...)`
"""
return self._get_gufunc_info(cres, str)
def _get_gufunc_asm(self, cres):
"""
Returns the assembly of the gufuncs used as parfor kernels
as a dict mapping the gufunc name to its assembly.
Arguments:
cres - a CompileResult from `njit(parallel=True, ...)`
"""
tm = cres.library._codegen._tm
def emit_asm(mod):
return str(tm.emit_assembly(mod))
return self._get_gufunc_info(cres, emit_asm)
def assert_fastmath(self, pyfunc, sig):
"""
Asserts that the fastmath flag has some effect in that suitable
instructions are now labelled as `fast`. Whether LLVM can actually do
anything to optimise better now the derestrictions are supplied is
another matter!
Arguments:
pyfunc - a function that contains operations with parallel semantics
sig - the type signature of pyfunc
"""
cres = self.compile_parallel_fastmath(pyfunc, sig)
_ir = self._get_gufunc_ir(cres)
def _get_fast_instructions(ir):
splitted = ir.splitlines()
fast_inst = []
for x in splitted:
m = re.search(r'\bfast\b', x) # \b for wholeword
if m is not None:
fast_inst.append(x)
return fast_inst
def _assert_fast(instrs):
ops = ('fadd', 'fsub', 'fmul', 'fdiv', 'frem', 'fcmp', 'call')
for inst in instrs:
count = 0
for op in ops:
match = op + ' fast'
if match in inst:
count += 1
self.assertTrue(count > 0)
for name, guir in _ir.items():
inst = _get_fast_instructions(guir)
_assert_fast(inst)
def blackscholes_impl(sptprice, strike, rate, volatility, timev):
# blackscholes example
logterm = np.log(sptprice / strike)
powterm = 0.5 * volatility * volatility
den = volatility * np.sqrt(timev)
d1 = (((rate + powterm) * timev) + logterm) / den
d2 = d1 - den
NofXd1 = 0.5 + 0.5 * 2.0 * d1
NofXd2 = 0.5 + 0.5 * 2.0 * d2
futureValue = strike * np.exp(- rate * timev)
c1 = futureValue * NofXd2
call = sptprice * NofXd1 - c1
put = call - futureValue + sptprice
return put
def lr_impl(Y, X, w, iterations):
# logistic regression example
for i in range(iterations):
w -= np.dot(((1.0 / (1.0 + np.exp(-Y * np.dot(X, w))) - 1.0) * Y), X)
return w
def example_kmeans_test(A, numCenter, numIter, init_centroids):
centroids = init_centroids
N, D = A.shape
for l in range(numIter):
dist = np.array([[math.sqrt(np.sum((A[i,:]-centroids[j,:])**2))
for j in range(numCenter)] for i in range(N)])
labels = np.array([dist[i,:].argmin() for i in range(N)])
centroids = np.array([[np.sum(A[labels==i, j])/np.sum(labels==i)
for j in range(D)] for i in range(numCenter)])
return centroids
def get_optimized_numba_ir(test_func, args, **kws):
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx, 'cpu')
test_ir = compiler.run_frontend(test_func)
if kws:
options = cpu.ParallelOptions(kws)
else:
options = cpu.ParallelOptions(True)
tp = TestPipeline(typingctx, targetctx, args, test_ir)
with cpu_target.nested_context(typingctx, targetctx):
typingctx.refresh()
targetctx.refresh()
inline_pass = inline_closurecall.InlineClosureCallPass(tp.state.func_ir,
options,
typed=True)
inline_pass.run()
rewrites.rewrite_registry.apply('before-inference', tp.state)
tp.state.typemap, tp.state.return_type, tp.state.calltypes, _ = \
typed_passes.type_inference_stage(tp.state.typingctx,
tp.state.targetctx, tp.state.func_ir, tp.state.args, None)
type_annotations.TypeAnnotation(
func_ir=tp.state.func_ir,
typemap=tp.state.typemap,
calltypes=tp.state.calltypes,
lifted=(),
lifted_from=None,
args=tp.state.args,
return_type=tp.state.return_type,
html_output=config.HTML)
diagnostics = numba.parfors.parfor.ParforDiagnostics()
preparfor_pass = numba.parfors.parfor.PreParforPass(
tp.state.func_ir, tp.state.typemap, tp.state.calltypes,
tp.state.typingctx, tp.state.targetctx, options,
swapped=diagnostics.replaced_fns)
preparfor_pass.run()
rewrites.rewrite_registry.apply('after-inference', tp.state)
flags = compiler.Flags()
parfor_pass = numba.parfors.parfor.ParforPass(
tp.state.func_ir, tp.state.typemap, tp.state.calltypes,
tp.state.return_type, tp.state.typingctx, tp.state.targetctx,
options, flags, tp.state.metadata, diagnostics=diagnostics)
parfor_pass.run()
parfor_pass = numba.parfors.parfor.ParforFusionPass(
tp.state.func_ir, tp.state.typemap, tp.state.calltypes,
tp.state.return_type, tp.state.typingctx, tp.state.targetctx,
options, flags, tp.state.metadata, diagnostics=diagnostics)
parfor_pass.run()
parfor_pass = numba.parfors.parfor.ParforPreLoweringPass(
tp.state.func_ir, tp.state.typemap, tp.state.calltypes,
tp.state.return_type, tp.state.typingctx, tp.state.targetctx,
options, flags, tp.state.metadata, diagnostics=diagnostics)
parfor_pass.run()
test_ir._definitions = build_definitions(test_ir.blocks)
return test_ir, tp
def countParfors(test_func, args, **kws):
test_ir, tp = get_optimized_numba_ir(test_func, args, **kws)
ret_count = 0
for label, block in test_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, numba.parfors.parfor.Parfor):
ret_count += 1
return ret_count
def countArrays(test_func, args, **kws):
test_ir, tp = get_optimized_numba_ir(test_func, args, **kws)
return _count_arrays_inner(test_ir.blocks, tp.state.typemap)
def get_init_block_size(test_func, args, **kws):
test_ir, tp = get_optimized_numba_ir(test_func, args, **kws)
blocks = test_ir.blocks
ret_count = 0
for label, block in blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, numba.parfors.parfor.Parfor):
ret_count += len(inst.init_block.body)
return ret_count
def _count_arrays_inner(blocks, typemap):
ret_count = 0
arr_set = set()
for label, block in blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, numba.parfors.parfor.Parfor):
parfor_blocks = inst.loop_body.copy()
parfor_blocks[0] = inst.init_block
ret_count += _count_arrays_inner(parfor_blocks, typemap)
if (isinstance(inst, ir.Assign)
and isinstance(typemap[inst.target.name],
types.ArrayCompatible)):
arr_set.add(inst.target.name)
ret_count += len(arr_set)
return ret_count
def countArrayAllocs(test_func, args, **kws):
test_ir, tp = get_optimized_numba_ir(test_func, args, **kws)
ret_count = 0
for block in test_ir.blocks.values():
ret_count += _count_array_allocs_inner(test_ir, block)
return ret_count
def _count_array_allocs_inner(func_ir, block):
ret_count = 0
for inst in block.body:
if isinstance(inst, numba.parfors.parfor.Parfor):
ret_count += _count_array_allocs_inner(func_ir, inst.init_block)
for b in inst.loop_body.values():
ret_count += _count_array_allocs_inner(func_ir, b)
if (isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Expr)
and inst.value.op == 'call'
and (guard(find_callname, func_ir, inst.value) == ('empty', 'numpy')
or guard(find_callname, func_ir, inst.value)
== ('empty_inferred', 'numba.np.unsafe.ndarray'))):
ret_count += 1
return ret_count
def countNonParforArrayAccesses(test_func, args, **kws):
test_ir, tp = get_optimized_numba_ir(test_func, args, **kws)
return _count_non_parfor_array_accesses_inner(test_ir, test_ir.blocks,
tp.state.typemap)
def _count_non_parfor_array_accesses_inner(f_ir, blocks, typemap, parfor_indices=None):
ret_count = 0
if parfor_indices is None:
parfor_indices = set()
for label, block in blocks.items():
for stmt in block.body:
if isinstance(stmt, numba.parfors.parfor.Parfor):
parfor_indices.add(stmt.index_var.name)
parfor_blocks = stmt.loop_body.copy()
parfor_blocks[0] = stmt.init_block
ret_count += _count_non_parfor_array_accesses_inner(
f_ir, parfor_blocks, typemap, parfor_indices)
# getitem
elif (is_getitem(stmt) and isinstance(typemap[stmt.value.value.name],
types.ArrayCompatible) and not _uses_indices(
f_ir, index_var_of_get_setitem(stmt), parfor_indices)):
ret_count += 1
# setitem
elif (is_setitem(stmt) and isinstance(typemap[stmt.target.name],
types.ArrayCompatible) and not _uses_indices(
f_ir, index_var_of_get_setitem(stmt), parfor_indices)):
ret_count += 1
# find parfor_index aliases
elif (isinstance(stmt, ir.Assign) and
isinstance(stmt.value, ir.Var) and
stmt.value.name in parfor_indices):
parfor_indices.add(stmt.target.name)
return ret_count
def _uses_indices(f_ir, index, index_set):
if index.name in index_set:
return True
ind_def = guard(get_definition, f_ir, index)
if isinstance(ind_def, ir.Expr) and ind_def.op == 'build_tuple':
varnames = set(v.name for v in ind_def.items)
return len(varnames & index_set) != 0
return False
class TestPipeline(object):
def __init__(self, typingctx, targetctx, args, test_ir):
self.state = compiler.StateDict()
self.state.typingctx = typingctx
self.state.targetctx = targetctx
self.state.args = args
self.state.func_ir = test_ir
self.state.typemap = None
self.state.return_type = None
self.state.calltypes = None
self.state.metadata = {}
@skip_parfors_unsupported
class TestParforBasic(TestParforsBase):
"""Smoke tests for the parfors transforms. These tests check the most basic
functionality"""
def __init__(self, *args):
TestParforsBase.__init__(self, *args)
# these are used in the mass of simple tests
m = np.reshape(np.arange(12.), (3, 4))
self.simple_args = [np.arange(3.), np.arange(4.), m, m.T]
def test_simple01(self):
def test_impl():
return np.ones(())
with self.assertRaises(AssertionError) as raises:
self.check(test_impl)
self.assertIn("\'@do_scheduling\' not found", str(raises.exception))
def test_simple02(self):
def test_impl():
return np.ones((1,))
self.check(test_impl)
def test_simple03(self):
def test_impl():
return np.ones((1, 2))
self.check(test_impl)
def test_simple04(self):
def test_impl():
return np.ones(1)
self.check(test_impl)
def test_simple07(self):
def test_impl():
return np.ones((1, 2), dtype=np.complex128)
self.check(test_impl)
def test_simple08(self):
def test_impl():
return np.ones((1, 2)) + np.ones((1, 2))
self.check(test_impl)
def test_simple09(self):
def test_impl():
return np.ones((1, 1))
self.check(test_impl)
def test_simple10(self):
def test_impl():
return np.ones((0, 0))
self.check(test_impl)
def test_simple11(self):
def test_impl():
return np.ones((10, 10)) + 1.
self.check(test_impl)
def test_simple12(self):
def test_impl():
return np.ones((10, 10)) + np.complex128(1.)
self.check(test_impl)
def test_simple13(self):
def test_impl():
return np.complex128(1.)
with self.assertRaises(AssertionError) as raises:
self.check(test_impl)
self.assertIn("\'@do_scheduling\' not found", str(raises.exception))
def test_simple14(self):
def test_impl():
return np.ones((10, 10))[0::20]
self.check(test_impl)
def test_simple15(self):
def test_impl(v1, v2, m1, m2):
return v1 + v1
self.check(test_impl, *self.simple_args)
def test_simple16(self):
def test_impl(v1, v2, m1, m2):
return m1 + m1
self.check(test_impl, *self.simple_args)
def test_simple17(self):
def test_impl(v1, v2, m1, m2):
return m2 + v1
self.check(test_impl, *self.simple_args)
@needs_lapack
def test_simple18(self):
def test_impl(v1, v2, m1, m2):
return m1.T + np.linalg.svd(m2)[1]
self.check(test_impl, *self.simple_args)
@needs_blas
def test_simple19(self):
def test_impl(v1, v2, m1, m2):
return np.dot(m1, v2)
self.check(test_impl, *self.simple_args)
@needs_blas
def test_simple20(self):
def test_impl(v1, v2, m1, m2):
return np.dot(m1, m2)
# gemm is left to BLAS
with self.assertRaises(AssertionError) as raises:
self.check(test_impl, *self.simple_args)
self.assertIn("\'@do_scheduling\' not found", str(raises.exception))
@needs_blas
def test_simple21(self):
def test_impl(v1, v2, m1, m2):
return np.dot(v1, v1)
self.check(test_impl, *self.simple_args)
def test_simple22(self):
def test_impl(v1, v2, m1, m2):
return np.sum(v1 + v1)
self.check(test_impl, *self.simple_args)
def test_simple23(self):
def test_impl(v1, v2, m1, m2):
x = 2 * v1
y = 2 * v1
return 4 * np.sum(x**2 + y**2 < 1) / 10
self.check(test_impl, *self.simple_args)
def test_simple24(self):
def test_impl():
n = 20
A = np.ones((n, n))
b = np.arange(n)
return np.sum(A[:, b])
self.check(test_impl)
@disabled_test
def test_simple_operator_15(self):
"""same as corresponding test_simple_<n> case but using operator.add"""
def test_impl(v1, v2, m1, m2):
return operator.add(v1, v1)
self.check(test_impl, *self.simple_args)
@disabled_test
def test_simple_operator_16(self):
def test_impl(v1, v2, m1, m2):
return operator.add(m1, m1)
self.check(test_impl, *self.simple_args)
@disabled_test
def test_simple_operator_17(self):
def test_impl(v1, v2, m1, m2):
return operator.add(m2, v1)
self.check(test_impl, *self.simple_args)
def test_inplace_alias(self):
# issue7201
def test_impl(a):
a += 1
a[:] = 3
def comparer(a, b):
np.testing.assert_equal(a, b)
x = np.ones(1)
self.check(test_impl, x, check_arg_equality=[comparer])
@skip_parfors_unsupported
class TestParforNumericalMisc(TestParforsBase):
""" Miscellaneous 'classical' numerical tests """
def test_pi(self):
def test_impl(n):
x = 2 * np.random.ranf(n) - 1
y = 2 * np.random.ranf(n) - 1
return 4 * np.sum(x**2 + y**2 < 1) / n
self.check(test_impl, 100000, decimal=1)
self.assertEqual(countParfors(test_impl, (types.int64, )), 1)
self.assertEqual(countArrays(test_impl, (types.intp,)), 0)
def test_blackscholes(self):
# blackscholes takes 5 1D float array args
args = (numba.float64[:], ) * 5
self.assertEqual(countParfors(blackscholes_impl, args), 1)
@needs_blas
def test_logistic_regression(self):
args = (numba.float64[:], numba.float64[:,:], numba.float64[:],
numba.int64)
self.assertEqual(countParfors(lr_impl, args), 2)
self.assertEqual(countArrayAllocs(lr_impl, args), 1)
def test_kmeans(self):
np.random.seed(0)
N = 1024
D = 10
centers = 3
A = np.random.ranf((N, D))
init_centroids = np.random.ranf((centers, D))
self.check(example_kmeans_test, A, centers, 3, init_centroids,
decimal=1)
# TODO: count parfors after k-means fusion is working
# requires recursive parfor counting
arg_typs = (types.Array(types.float64, 2, 'C'), types.intp, types.intp,
types.Array(types.float64, 2, 'C'))
self.assertEqual(
countNonParforArrayAccesses(example_kmeans_test, arg_typs), 0)
@skip_parfors_unsupported
class TestParforNumPy(TestParforsBase):
"""Tests NumPy functionality under parfors"""
@needs_blas
def test_mvdot(self):
def test_impl(a, v):
return np.dot(a, v)
A = np.linspace(0, 1, 20).reshape(2, 10)
v = np.linspace(2, 1, 10)
self.check(test_impl, A, v)
def test_fuse_argmin_argmax_max_min(self):
for op in [np.argmin, np.argmax, np.min, np.max]:
def test_impl(n):
A = np.ones(n)
C = op(A)
B = A.sum()
return B + C
self.check(test_impl, 256)
self.assertEqual(countParfors(test_impl, (types.int64, )), 1)
self.assertEqual(countArrays(test_impl, (types.intp,)), 0)
def test_np_random_func_direct_import(self):
def test_impl(n):
A = randn(n)
return A[0]
self.assertEqual(countParfors(test_impl, (types.int64, )), 1)
def test_arange(self):
# test with stop only
def test_impl1(n):
return np.arange(n)
# start and stop
def test_impl2(s, n):
return np.arange(s, n)
# start, step, stop
def test_impl3(s, n, t):
return np.arange(s, n, t)
for arg in [11, 128, 30.0, complex(4,5), complex(5,4)]:
self.check(test_impl1, arg)
self.check(test_impl2, 2, arg)
self.check(test_impl3, 2, arg, 2)
def test_arange_dtype(self):
# test with stop only
def test_impl1(n):
return np.arange(n, dtype=np.float32)
# start and stop
def test_impl2(s, n):
return np.arange(s, n, dtype=np.float32)
# start, step, stop
def test_impl3(s, n, t):
return np.arange(s, n, t, dtype=np.float32)
for arg in [11, 128, 30.0]:
self.check(test_impl1, arg)
self.check(test_impl2, 2, arg)
self.check(test_impl3, 2, arg, 2)
def test_linspace(self):
# without num
def test_impl1(start, stop):
return np.linspace(start, stop)
# with num
def test_impl2(start, stop, num):
return np.linspace(start, stop, num)
for arg in [11, 128, 30.0, complex(4,5), complex(5,4)]:
self.check(test_impl1, 2, arg)
self.check(test_impl2, 2, arg, 30)
def test_mean(self):
def test_impl(A):
return A.mean()
N = 100
A = np.random.ranf(N)
B = np.random.randint(10, size=(N, 3))
self.check(test_impl, A)
self.check(test_impl, B)
self.assertEqual(countParfors(test_impl, (types.Array(types.float64, 1, 'C'), )), 1)
self.assertEqual(countParfors(test_impl, (types.Array(types.float64, 2, 'C'), )), 1)
# Test variants
data_gen = lambda: self.gen_linspace_variants(1)
self.check_variants(test_impl, data_gen)
self.count_parfors_variants(test_impl, data_gen)
def test_var(self):
def test_impl(A):
return A.var()
N = 100
A = np.random.ranf(N)
B = np.random.randint(10, size=(N, 3))
C = A + 1j * A
self.check(test_impl, A)
self.check(test_impl, B)
self.check(test_impl, C)
self.assertEqual(countParfors(test_impl, (types.Array(types.float64, 1, 'C'), )), 2)
self.assertEqual(countParfors(test_impl, (types.Array(types.float64, 2, 'C'), )), 2)
# Test variants
data_gen = lambda: self.gen_linspace_variants(1)
self.check_variants(test_impl, data_gen)
self.count_parfors_variants(test_impl, data_gen)
def test_std(self):
def test_impl(A):
return A.std()
N = 100
A = np.random.ranf(N)
B = np.random.randint(10, size=(N, 3))
C = A + 1j * A
self.check(test_impl, A)
self.check(test_impl, B)
self.check(test_impl, C)
argty = (types.Array(types.float64, 1, 'C'),)
self.assertEqual(countParfors(test_impl, argty), 2)
self.assertEqual(countParfors(test_impl, argty), 2)
# Test variants
data_gen = lambda: self.gen_linspace_variants(1)
self.check_variants(test_impl, data_gen)
self.count_parfors_variants(test_impl, data_gen)
def test_random_parfor(self):
"""
Test function with only a random call to make sure a random function
like ranf is actually translated to a parfor.
"""
def test_impl(n):
A = np.random.ranf((n, n))
return A
self.assertEqual(countParfors(test_impl, (types.int64, )), 1)
def test_randoms(self):
def test_impl(n):
A = np.random.standard_normal(size=(n, n))
B = np.random.randn(n, n)
C = np.random.normal(0.0, 1.0, (n, n))
D = np.random.chisquare(1.0, (n, n))
E = np.random.randint(1, high=3, size=(n, n))
F = np.random.triangular(1, 2, 3, (n, n))
return np.sum(A+B+C+D+E+F)
n = 128
cpfunc = self.compile_parallel(test_impl, (numba.typeof(n),))
parfor_output = cpfunc.entry_point(n)
py_output = test_impl(n)
# check results within 5% since random numbers generated in parallel
np.testing.assert_allclose(parfor_output, py_output, rtol=0.05)
self.assertEqual(countParfors(test_impl, (types.int64, )), 1)
def test_dead_randoms(self):
def test_impl(n):
A = np.random.standard_normal(size=(n, n))
B = np.random.randn(n, n)
C = np.random.normal(0.0, 1.0, (n, n))
D = np.random.chisquare(1.0, (n, n))
E = np.random.randint(1, high=3, size=(n, n))
F = np.random.triangular(1, 2, 3, (n, n))
return 3
n = 128
cpfunc = self.compile_parallel(test_impl, (numba.typeof(n),))
parfor_output = cpfunc.entry_point(n)
py_output = test_impl(n)
self.assertEqual(parfor_output, py_output)
self.assertEqual(countParfors(test_impl, (types.int64, )), 0)
def test_min(self):
def test_impl1(A):
return A.min()
def test_impl2(A):
return np.min(A)
n = 211
A = np.random.ranf(n)
B = np.random.randint(10, size=n).astype(np.int32)
C = np.random.ranf((n, n)) # test multi-dimensional array
D = np.array([np.inf, np.inf])
self.check(test_impl1, A)
self.check(test_impl1, B)
self.check(test_impl1, C)
self.check(test_impl1, D)
self.check(test_impl2, A)
self.check(test_impl2, B)
self.check(test_impl2, C)
self.check(test_impl2, D)
# checks that 0d array input raises
msg = ("zero-size array to reduction operation "
"minimum which has no identity")
for impl in (test_impl1, test_impl2):
pcfunc = self.compile_parallel(impl, (types.int64[:],))
with self.assertRaises(ValueError) as e:
pcfunc.entry_point(np.array([], dtype=np.int64))
self.assertIn(msg, str(e.exception))
# Test variants
data_gen = lambda: self.gen_linspace_variants(1)
self.check_variants(test_impl1, data_gen)
self.count_parfors_variants(test_impl1, data_gen)
self.check_variants(test_impl2, data_gen)
self.count_parfors_variants(test_impl2, data_gen)
def test_max(self):
def test_impl1(A):
return A.max()
def test_impl2(A):
return np.max(A)
n = 211
A = np.random.ranf(n)
B = np.random.randint(10, size=n).astype(np.int32)
C = np.random.ranf((n, n)) # test multi-dimensional array
D = np.array([-np.inf, -np.inf])
self.check(test_impl1, A)
self.check(test_impl1, B)
self.check(test_impl1, C)
self.check(test_impl1, D)
self.check(test_impl2, A)
self.check(test_impl2, B)
self.check(test_impl2, C)
self.check(test_impl2, D)
# checks that 0d array input raises
msg = ("zero-size array to reduction operation "
"maximum which has no identity")
for impl in (test_impl1, test_impl2):
pcfunc = self.compile_parallel(impl, (types.int64[:],))
with self.assertRaises(ValueError) as e:
pcfunc.entry_point(np.array([], dtype=np.int64))
self.assertIn(msg, str(e.exception))
# Test variants
data_gen = lambda: self.gen_linspace_variants(1)
self.check_variants(test_impl1, data_gen)
self.count_parfors_variants(test_impl1, data_gen)
self.check_variants(test_impl2, data_gen)
self.count_parfors_variants(test_impl2, data_gen)
def test_argmax(self):
def test_impl1(A):
return A.argmax()
def test_impl2(A):
return np.argmax(A)
n = 211
A = np.array([1., 0., 3., 2., 3.])
B = np.random.randint(10, size=n).astype(np.int32)
C = np.random.ranf((n, n)) # test multi-dimensional array
D = np.array([1., 0., np.nan, 2., 3.])
self.check(test_impl1, A)
self.check(test_impl1, B)
self.check(test_impl1, C)
self.check(test_impl1, D)
self.check(test_impl2, A)
self.check(test_impl2, B)
self.check(test_impl2, C)
self.check(test_impl2, D)
# checks that 0d array input raises
msg = 'attempt to get argmax of an empty sequence'
for impl in (test_impl1, test_impl2):
pcfunc = self.compile_parallel(impl, (types.int64[:],))
with self.assertRaises(ValueError) as e:
pcfunc.entry_point(np.array([], dtype=np.int64))
self.assertIn(msg, str(e.exception))
# Test variants
data_gen = lambda: self.gen_linspace_variants(1)
self.check_variants(test_impl1, data_gen)
self.count_parfors_variants(test_impl1, data_gen)
self.check_variants(test_impl2, data_gen)
self.count_parfors_variants(test_impl2, data_gen)
def test_argmin(self):
def test_impl1(A):
return A.argmin()
def test_impl2(A):
return np.argmin(A)
n = 211
A = np.array([1., 0., 2., 0., 3.])
B = np.random.randint(10, size=n).astype(np.int32)
C = np.random.ranf((n, n)) # test multi-dimensional array
D = np.array([1., 0., np.nan, 0., 3.])
self.check(test_impl1, A)
self.check(test_impl1, B)
self.check(test_impl1, C)
self.check(test_impl1, D)
self.check(test_impl2, A)
self.check(test_impl2, B)
self.check(test_impl2, C)
self.check(test_impl2, D)
# checks that 0d array input raises
msg = 'attempt to get argmin of an empty sequence'
for impl in (test_impl1, test_impl2):
pcfunc = self.compile_parallel(impl, (types.int64[:],))
with self.assertRaises(ValueError) as e:
pcfunc.entry_point(np.array([], dtype=np.int64))
self.assertIn(msg, str(e.exception))
# Test variants
data_gen = lambda: self.gen_linspace_variants(1)
self.check_variants(test_impl1, data_gen)
self.count_parfors_variants(test_impl1, data_gen)
self.check_variants(test_impl2, data_gen)
self.count_parfors_variants(test_impl2, data_gen)
def test_ndarray_fill(self):
def test_impl(x):
x.fill(7.0)
return x
x = np.zeros(10)
self.check(test_impl, x)
argty = (types.Array(types.float64, 1, 'C'),)
self.assertEqual(countParfors(test_impl, argty), 1)
def test_ndarray_fill2d(self):
def test_impl(x):
x.fill(7.0)
return x
x = np.zeros((2,2))
self.check(test_impl, x)
argty = (types.Array(types.float64, 2, 'C'),)
self.assertEqual(countParfors(test_impl, argty), 1)
def test_reshape_with_neg_one(self):
# issue3314
def test_impl(a, b):
result_matrix = np.zeros((b, b, 1), dtype=np.float64)
sub_a = a[0:b]
a = sub_a.size
b = a / 1
z = sub_a.reshape(-1, 1)
result_data = sub_a / z
result_matrix[:,:,0] = result_data
return result_matrix
a = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
b = 3
self.check(test_impl, a, b)
def test_reshape_with_large_neg(self):
# issue3314
def test_impl(a, b):
result_matrix = np.zeros((b, b, 1), dtype=np.float64)
sub_a = a[0:b]
a = sub_a.size
b = a / 1
z = sub_a.reshape(-1307, 1)
result_data = sub_a / z
result_matrix[:,:,0] = result_data
return result_matrix
a = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
b = 3
self.check(test_impl, a, b)
def test_reshape_with_too_many_neg_one(self):
# issue3314
with self.assertRaises(errors.UnsupportedRewriteError) as raised:
@njit(parallel=True)
def test_impl(a, b):
rm = np.zeros((b, b, 1), dtype=np.float64)
sub_a = a[0:b]
a = sub_a.size
b = a / 1
z = sub_a.reshape(-1, -1)
result_data = sub_a / z
rm[:,:,0] = result_data
return rm
a = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
b = 3
test_impl(a, b)
msg = ("The reshape API may only include one negative argument.")
self.assertIn(msg, str(raised.exception))
def test_0d_array(self):
def test_impl(n):
return np.sum(n) + np.prod(n) + np.min(n) + np.max(n) + np.var(n)
self.check(test_impl, np.array(7), check_scheduling=False)
def test_real_imag_attr(self):
# See issue 8012
def test_impl(z):
return np.sum(z.real ** 2 + z.imag ** 2)
z = np.arange(5) * (1 + 1j)
self.check(test_impl, z)
self.assertEqual(countParfors(test_impl, (types.complex128[::1],)), 1)
class TestParforsUnsupported(TestCase):
"""Tests for unsupported use of parfors"""
@unittest.skipIf(not _32bit, "Only impacts 32 bit hardware")
@needs_blas
def test_unsupported_combination_raises(self):
"""
This test is in place until issues with the 'parallel'
target on 32 bit hardware are fixed.
"""
with self.assertRaises(errors.UnsupportedParforsError) as raised:
@njit(parallel=True)
def ddot(a, v):
return np.dot(a, v)
A = np.linspace(0, 1, 20).reshape(2, 10)
v = np.linspace(2, 1, 10)
ddot(A, v)
msg = ("The 'parallel' target is not currently supported on 32 bit "
"hardware")
self.assertIn(msg, str(raised.exception))
@skip_parfors_unsupported
class TestParfors(TestParforsBase):
""" Tests cpython, reduction and various parfors features"""
def test_arraymap(self):
def test_impl(a, x, y):
return a * x + y
self.check_variants(test_impl, lambda: self.gen_linspace_variants(3))
def test_0d_broadcast(self):
def test_impl():
X = np.array(1)
Y = np.ones((10, 12))
return np.sum(X + Y)
self.check(test_impl)
self.assertEqual(countParfors(test_impl, ()), 1)
def test_2d_parfor(self):
def test_impl():
X = np.ones((10, 12))
Y = np.zeros((10, 12))
return np.sum(X + Y)
self.check(test_impl)
self.assertEqual(countParfors(test_impl, ()), 1)
def test_nd_parfor(self):
def case1():
X = np.ones((10, 12))
Y = np.zeros((10, 12))
yield (X, Y)
data_gen = lambda: chain(case1(), self.gen_linspace_variants(2))
def test_impl(X, Y):
return np.sum(X + Y)
self.check_variants(test_impl, data_gen)
self.count_parfors_variants(test_impl, data_gen)
def test_np_func_direct_import(self):
from numpy import ones # import here becomes FreeVar
def test_impl(n):
A = ones(n)
return A[0]
n = 111
self.check(test_impl, n)
def test_size_assertion(self):
def test_impl(m, n):
A = np.ones(m)
B = np.ones(n)
return np.sum(A + B)
self.check(test_impl, 10, 10)
with self.assertRaises(AssertionError) as raises:
cfunc = njit(parallel=True)(test_impl)
cfunc(10, 9)
msg = "Sizes of A, B do not match"
self.assertIn(msg, str(raises.exception))
def test_cfg(self):
# from issue #2477
def test_impl(x, is_positive, N):
for i in numba.prange(2):
for j in range( i*N//2, (i+1)*N//2 ):
is_positive[j] = 0
if x[j] > 0:
is_positive[j] = 1
return is_positive
N = 100
x = np.random.rand(N)
is_positive = np.zeros(N)
self.check(test_impl, x, is_positive, N)
def test_reduce(self):
def test_impl(A):
init_val = 10
return reduce(lambda a,b: min(a, b), A, init_val)
n = 211
A = np.random.ranf(n)
self.check(test_impl, A)
A = np.random.randint(10, size=n).astype(np.int32)
self.check(test_impl, A)
# test checking the number of arguments for the reduce function
def test_impl():
g = lambda x: x ** 2
return reduce(g, np.array([1, 2, 3, 4, 5]), 2)
with self.assertTypingError():
self.check(test_impl)
# test checking reduction over bitarray masked arrays
n = 160
A = np.random.randint(10, size=n).astype(np.int32)
def test_impl(A):
return np.sum(A[A>=3])
self.check(test_impl, A)
# TODO: this should fuse
# self.assertTrue(countParfors(test_impl, (numba.float64[:],)) == 1)
def test_impl(A):
B = A[:,0]
return np.sum(A[B>=3,1])
self.check(test_impl, A.reshape((16,10)))
# TODO: this should also fuse
#self.assertTrue(countParfors(test_impl, (numba.float64[:,:],)) == 1)
def test_impl(A):
B = A[:,0]
return np.sum(A[B>=3,1:2])
self.check(test_impl, A.reshape((16,10)))
# this doesn't fuse due to mixed indices
self.assertEqual(countParfors(test_impl, (numba.float64[:,:],)), 2)
def test_impl(A):
min_val = np.amin(A)
return A - min_val
self.check(test_impl, A)
# this doesn't fuse due to use of reduction variable
self.assertEqual(countParfors(test_impl, (numba.float64[:],)), 2)
def test_use_of_reduction_var1(self):
def test_impl():
acc = 0
for i in prange(1):
acc = cmath.sqrt(acc)
return acc
# checks that invalid use of reduction variable is detected
msg = ("Use of reduction variable acc in an unsupported reduction function.")
with self.assertRaises(ValueError) as e:
pcfunc = self.compile_parallel(test_impl, ())
self.assertIn(msg, str(e.exception))
def test_unsupported_floordiv1(self):
def test_impl():
acc = 100
for i in prange(2):
acc //= 2
return acc
# checks that invalid use of ifloordiv reduction operator is detected
msg = ("Parallel floordiv reductions are not supported. "
"If all divisors are integers then a floordiv "
"reduction can in some cases be parallelized as "
"a multiply reduction followed by a floordiv of "
"the resulting product.")
with self.assertRaises(errors.NumbaValueError) as e:
pcfunc = self.compile_parallel(test_impl, ())
self.assertIn(msg, str(e.exception))
def test_unsupported_xor1(self):
def test_impl():
acc = 100
for i in prange(2):
acc ^= i + 2
return acc
msg = ("Use of reduction variable acc in an unsupported reduction function.")
with self.assertRaises(ValueError) as e:
pcfunc = self.compile_parallel(test_impl, ())
self.assertIn(msg, str(e.exception))
def test_parfor_array_access1(self):
# signed index of the prange generated by sum() should be replaced
# resulting in array A to be eliminated (see issue #2846)
def test_impl(n):
A = np.ones(n)
return A.sum()
n = 211
self.check(test_impl, n)
self.assertEqual(countArrays(test_impl, (types.intp,)), 0)
def test_parfor_array_access2(self):
# in this test, the prange index has the same name (i) in two loops
# thus, i has multiple definitions and is harder to replace
def test_impl(n):
A = np.ones(n)
m = 0
n = 0
for i in numba.prange(len(A)):
m += A[i]
for i in numba.prange(len(A)):
if m == n: # access in another block
n += A[i]
return m + n
n = 211
self.check(test_impl, n)
self.assertEqual(countNonParforArrayAccesses(test_impl, (types.intp,)), 0)
def test_parfor_array_access3(self):
def test_impl(n):
A = np.ones(n, np.int64)
m = 0
for i in numba.prange(len(A)):
m += A[i]
if m==2:
i = m
n = 211
with self.assertRaises(errors.UnsupportedRewriteError) as raises:
self.check(test_impl, n)
self.assertIn("Overwrite of parallel loop index", str(raises.exception))
@needs_blas
def test_parfor_array_access4(self):
# in this test, one index of a multi-dim access should be replaced
# np.dot parallel implementation produces this case
def test_impl(A, b):
return np.dot(A, b)
n = 211
d = 4
A = np.random.ranf((n, d))
b = np.random.ranf(d)
self.check(test_impl, A, b)
# make sure the parfor index is replaced in build_tuple of access to A
test_ir, tp = get_optimized_numba_ir(
test_impl, (types.Array(types.float64, 2, 'C'),
types.Array(types.float64, 1, 'C')))
# this code should have one basic block after optimization
self.assertTrue(len(test_ir.blocks) == 1 and 0 in test_ir.blocks)
block = test_ir.blocks[0]
parfor_found = False
parfor = None
for stmt in block.body:
if isinstance(stmt, numba.parfors.parfor.Parfor):
parfor_found = True
parfor = stmt
self.assertTrue(parfor_found)
build_tuple_found = False
# there should be only one build_tuple
for bl in parfor.loop_body.values():
for stmt in bl.body:
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == 'build_tuple'):
build_tuple_found = True
self.assertTrue(parfor.index_var in stmt.value.items)
self.assertTrue(build_tuple_found)
def test_parfor_dtype_type(self):
# test array type replacement creates proper type
def test_impl(a):
for i in numba.prange(len(a)):
a[i] = a.dtype.type(0)
return a[4]
a = np.ones(10)
self.check(test_impl, a)
def test_parfor_array_access5(self):
# one dim is slice in multi-dim access
def test_impl(n):
X = np.ones((n, 3))
y = 0
for i in numba.prange(n):
y += X[i,:].sum()
return y
n = 211
self.check(test_impl, n)
self.assertEqual(countNonParforArrayAccesses(test_impl, (types.intp,)), 0)
@disabled_test # Test itself is problematic, see #3155
def test_parfor_hoist_setitem(self):
# Make sure that read of out is not hoisted.
def test_impl(out):
for i in prange(10):
out[0] = 2 * out[0]
return out[0]
out = np.ones(1)
self.check(test_impl, out)
@needs_blas
def test_parfor_generate_fuse(self):
# issue #2857
def test_impl(N, D):
w = np.ones(D)
X = np.ones((N, D))
Y = np.ones(N)
for i in range(3):
B = (-Y * np.dot(X, w))
return B
n = 211
d = 3
self.check(test_impl, n, d)
self.assertEqual(countArrayAllocs(test_impl, (types.intp, types.intp)), 4)
self.assertEqual(countParfors(test_impl, (types.intp, types.intp)), 4)
def test_ufunc_expr(self):
# issue #2885
def test_impl(A, B):
return np.bitwise_and(A, B)
A = np.ones(3, np.uint8)
B = np.ones(3, np.uint8)
B[1] = 0
self.check(test_impl, A, B)
def test_find_callname_intrinsic(self):
def test_impl(n):
A = unsafe_empty((n,))
for i in range(n):
A[i] = i + 2.0
return A
# the unsafe allocation should be found even though it is imported
# as a different name
self.assertEqual(countArrayAllocs(test_impl, (types.intp,)), 1)
def test_reduction_var_reuse(self):
# issue #3139
def test_impl(n):
acc = 0
for i in prange(n):
acc += 1
for i in prange(n):
acc += 2
return acc
self.check(test_impl, 16)
def test_non_identity_initial(self):
# issue #7344
def test_impl(A, cond):
s = 1
for i in prange(A.shape[0]):
if cond[i]:
s += 1
return s
self.check(test_impl, np.ones(10), np.ones(10).astype('bool'))
def test_if_not_else_reduction(self):
# issue #7344
def test_impl(A, cond):
s = 1
t = 10
for i in prange(A.shape[0]):
if cond[i]:
s += 1
t += 1
else:
s += 2
return s + t
self.check(test_impl, np.ones(10), np.ones(10).astype('bool'))
def test_two_d_array_reduction_reuse(self):
def test_impl(n):
shp = (13, 17)
size = shp[0] * shp[1]
result1 = np.zeros(shp, np.int_)
tmp = np.arange(size).reshape(shp)
for i in numba.prange(n):
result1 += tmp
for i in numba.prange(n):
result1 += tmp
return result1
self.check(test_impl, 100)
def test_one_d_array_reduction(self):
def test_impl(n):
result = np.zeros(1, np.int_)
for i in numba.prange(n):
result += np.array([i], np.int_)
return result
self.check(test_impl, 100)
def test_two_d_array_reduction(self):
def test_impl(n):
shp = (13, 17)
size = shp[0] * shp[1]
result1 = np.zeros(shp, np.int_)
tmp = np.arange(size).reshape(shp)
for i in numba.prange(n):
result1 += tmp
return result1
self.check(test_impl, 100)
def test_two_d_array_reduction_with_float_sizes(self):
# result1 is float32 and tmp is float64.
# Tests reduction with differing dtypes.
def test_impl(n):
shp = (2, 3)
result1 = np.zeros(shp, np.float32)
tmp = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).reshape(shp)
for i in numba.prange(n):
result1 += tmp
return result1
self.check(test_impl, 100)
def test_two_d_array_reduction_prod(self):
def test_impl(n):
shp = (13, 17)
result1 = 2 * np.ones(shp, np.int_)
tmp = 2 * np.ones_like(result1)
for i in numba.prange(n):
result1 *= tmp
return result1
self.check(test_impl, 100)
def test_three_d_array_reduction(self):
def test_impl(n):
shp = (3, 2, 7)
result1 = np.zeros(shp, np.int_)
for i in numba.prange(n):
result1 += np.ones(shp, np.int_)
return result1
self.check(test_impl, 100)
def test_preparfor_canonicalize_kws(self):
# test canonicalize_array_math typing for calls with kw args
def test_impl(A):
return A.argsort() + 1
n = 211
A = np.arange(n)
self.check(test_impl, A)
def test_preparfor_datetime64(self):
# test array.dtype transformation for datetime64
def test_impl(A):
return A.dtype
A = np.empty(1, np.dtype('datetime64[ns]'))
cpfunc = self.compile_parallel(test_impl, (numba.typeof(A),))
self.assertEqual(cpfunc.entry_point(A), test_impl(A))
def test_no_hoisting_with_member_function_call(self):
def test_impl(X):
n = X.shape[0]
acc = 0
for i in prange(n):
R = {1, 2, 3}
R.add(i)
tmp = 0
for x in R:
tmp += x
acc += tmp
return acc
self.check(test_impl, np.random.ranf(128))
def test_array_compare_scalar(self):
""" issue3671: X != 0 becomes an arrayexpr with operator.ne.
That is turned into a parfor by devectorizing. Make sure
the return type of the devectorized operator.ne
on integer types works properly.
"""
def test_impl():
X = np.zeros(10, dtype=np.int_)
return X != 0
self.check(test_impl)
def test_array_analysis_optional_def(self):
def test_impl(x, half):
size = len(x)
parr = x[0:size]
if half:
parr = x[0:size//2]
return parr.sum()
x = np.ones(20)
self.check(test_impl, x, True, check_scheduling=False)
def test_prange_side_effects(self):
def test_impl(a, b):
data = np.empty(len(a), dtype=np.float64)
size = len(data)
for i in numba.prange(size):
data[i] = a[i]
for i in numba.prange(size):
data[i] = data[i] + b[i]
return data
x = np.arange(10 ** 2, dtype=float)
y = np.arange(10 ** 2, dtype=float)
self.check(test_impl, x, y)
self.assertEqual(countParfors(test_impl,
(types.Array(types.float64, 1, 'C'),
types.Array(types.float64, 1, 'C'))), 1)
def test_tuple1(self):
def test_impl(a):
atup = (3, 4)
b = 7
for i in numba.prange(len(a)):
a[i] += atup[0] + atup[1] + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_tuple2(self):
def test_impl(a):
atup = a.shape
b = 7
for i in numba.prange(len(a)):
a[i] += atup[0] + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_tuple3(self):
def test_impl(a):
atup = (np.arange(10), 4)
b = 7
for i in numba.prange(len(a)):
a[i] += atup[0][5] + atup[1] + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_namedtuple1(self):
def test_impl(a):
antup = TestNamedTuple(part0=3, part1=4)
b = 7
for i in numba.prange(len(a)):
a[i] += antup.part0 + antup.part1 + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_namedtuple2(self):
TestNamedTuple2 = namedtuple('TestNamedTuple2', ('part0', 'part1'))
def test_impl(a):
antup = TestNamedTuple2(part0=3, part1=4)
b = 7
for i in numba.prange(len(a)):
a[i] += antup.part0 + antup.part1 + b
return a
x = np.arange(10)
self.check(test_impl, x)
def test_namedtuple3(self):
# issue5872: test that a.y[:] = 5 is not removed as
# deadcode.
TestNamedTuple3 = namedtuple(f'TestNamedTuple3',['y'])
def test_impl(a):
a.y[:] = 5
def comparer(a, b):
np.testing.assert_almost_equal(a.y, b.y)
x = TestNamedTuple3(y=np.zeros(10))
self.check(test_impl, x, check_arg_equality=[comparer])
def test_inplace_binop(self):
def test_impl(a, b):
b += a
return b
X = np.arange(10) + 10
Y = np.arange(10) + 100
self.check(test_impl, X, Y)
self.assertEqual(countParfors(test_impl,
(types.Array(types.float64, 1, 'C'),
types.Array(types.float64, 1, 'C'))), 1)
def test_tuple_concat(self):
# issue5383
def test_impl(a):
n = len(a)
array_shape = n, n
indices = np.zeros(((1,) + array_shape + (1,)), dtype=np.uint64)
k_list = indices[0, :]
for i, g in enumerate(a):
k_list[i, i] = i
return k_list
x = np.array([1, 1])
self.check(test_impl, x)
def test_tuple_concat_with_reverse_slice(self):
# issue5383
def test_impl(a):
n = len(a)
array_shape = n, n
indices = np.zeros(((1,) + array_shape + (1,))[:-1],
dtype=np.uint64)
k_list = indices[0, :]
for i, g in enumerate(a):
k_list[i, i] = i
return k_list
x = np.array([1, 1])
self.check(test_impl, x)
def test_array_tuple_concat(self):
# issue6399
def test_impl(a):
S = (a,) + (a, a)
return S[0].sum()
x = np.ones((3,3))
self.check(test_impl, x)
def test_high_dimension1(self):
# issue6749
def test_impl(x):
return x * 5.0
x = np.ones((2, 2, 2, 2, 2, 15))
self.check(test_impl, x)
def test_tuple_arg(self):
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10, 5)
self.check(test_impl, np.empty(sz), sz)
def test_tuple_arg_not_whole_array(self):
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10, 5)
self.check(test_impl, np.zeros(sz), (10, 3))
def test_tuple_for_pndindex(self):
def test_impl(x):
sz = (10, 5)
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10, 5)
self.check(test_impl, np.zeros(sz))
def test_tuple_arg_literal(self):
def test_impl(x, first):
sz = (first, 5)
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10, 5)
self.check(test_impl, np.zeros(sz), 10)
def test_tuple_of_literal_nonliteral(self):
# This test has to be done manually as the self.check uses
# compile_isolated and one function cannot "see" the other
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
def call(x, fn):
return fn(x, (10, 3)) # Only want to iterate to the 3rd
get_input = lambda: np.zeros((10, 10))
expected = call(get_input(), test_impl)
def check(dec):
f1 = dec(test_impl)
f2 = njit(call) # no parallel semantics in the caller
got = f2(get_input(), f1)
self.assertPreciseEqual(expected, got)
for d in (njit, njit(parallel=True)):
check(d)
def test_tuple_arg_1d(self):
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10,)
self.check(test_impl, np.zeros(sz), sz)
def test_tuple_arg_1d_literal(self):
def test_impl(x):
sz = (10,)
for i in numba.pndindex(sz):
x[i] = 1
return x
sz = (10,)
self.check(test_impl, np.zeros(sz))
def test_int_arg_pndindex(self):
def test_impl(x, sz):
for i in numba.pndindex(sz):
x[i] = 1
return x
self.check(test_impl, np.zeros((10, 10)), 3)
def test_prange_unknown_call1(self):
@register_jitable
def issue7854_proc(u, i, even, size):
for j in range((even + i + 1) % 2 + 1, size - 1, 2):
u[i, j] = u[i + 1, j] + 1
# issue7854
# Forbid fusion in unanalyzable call inside prange.
def test_impl(u, size):
for i in numba.prange(1, size - 1):
issue7854_proc(u, i, 0, size)
for i in numba.prange(1, size - 1):
issue7854_proc(u, i, 1, size)
return u
size = 4
u = np.zeros((size, size))
cptypes = (numba.float64[:, ::1], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, u, size)
def test_prange_index_calc1(self):
# Should forbid fusion due to cross-iteration dependency as
# detected by loop index calcuation (i+1) as array index.
def test_impl(u, size):
for i in numba.prange(1, size - 1):
for j in range((i + 1) % 2 + 1, size - 1, 2):
u[i, j] = u[i + 1, j] + 1
for i in numba.prange(1, size - 1):
for j in range(i % 2 + 1, size - 1, 2):
u[i, j] = u[i + 1, j] + 1
return u
size = 4
u = np.zeros((size, size))
cptypes = (numba.float64[:, ::1], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, u, size)
def test_prange_reverse_order1(self):
# Testing if reversed loop index usage as array index
# prevents fusion.
def test_impl(a, b, size):
for i in numba.prange(size):
for j in range(size):
a[i, j] = b[i, j] + 1
for i in numba.prange(size):
for j in range(size):
b[j, i] = 3
return a[0, 0] + b[0, 0]
size = 10
a = np.zeros((size, size))
b = np.zeros((size, size))
cptypes = (numba.float64[:, ::1], numba.float64[:, ::1], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, a, b, size)
def test_prange_parfor_index_then_not(self):
# Testing if accessing an array first with a parfor index then
# without will prevent fusion.
def test_impl(a, size):
b = 0
for i in numba.prange(size):
a[i] = i
for i in numba.prange(size):
b += a[5]
return b
size = 10
a = np.zeros(size)
cptypes = (numba.float64[:], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, a, size)
def test_prange_parfor_index_const_tuple_fusion(self):
# Testing if accessing a tuple with prange index
# and later with a constant will not prevent fusion.
def test_impl(a, tup, size):
acc = 0
for i in numba.prange(size):
a[i] = i + tup[i]
for i in numba.prange(size):
acc += a[i] + tup[1]
return acc
size = 10
a = np.zeros(size)
b = tuple(a)
cptypes = (numba.float64[:],
types.containers.UniTuple(types.float64, size),
types.intp)
self.assertEqual(countParfors(test_impl, cptypes), 1)
self.check(test_impl, a, b, size)
def test_prange_non_parfor_index_then_opposite(self):
# Testing if accessing an array first without a parfor index then
# with will prevent fusion.
def test_impl(a, b, size):
for i in numba.prange(size):
b[i] = a[5]
for i in numba.prange(size):
a[i] = i
# Need this to stop previous prange from being optimized away.
b[0] += a[0]
return b
size = 10
a = np.zeros(size)
b = np.zeros(size)
cptypes = (numba.float64[:], numba.float64[:], types.int64)
self.assertEqual(countParfors(test_impl, cptypes), 2)
self.check(test_impl, a, b, size)
def test_prange_optional(self):
def test_impl(arr, pred=None):
for i in prange(1):
if pred is not None:
arr[i] = 0.0
arr = np.ones(10)
self.check(test_impl, arr, None,
check_arg_equality=[np.testing.assert_almost_equal,
lambda x, y: x == y])
self.assertEqual(arr.sum(), 10.0)
def test_untraced_value_tuple(self):
# This is a test for issue #6478.
def test_impl():
a = (1.2, 1.3)
return a[0]
with self.assertRaises(AssertionError) as raises:
self.check(test_impl)
self.assertIn("\'@do_scheduling\' not found", str(raises.exception))
def test_recursive_untraced_value_tuple(self):
# This is a test for issue #6478.
def test_impl():
a = ((1.2, 1.3),)
return a[0][0]
with self.assertRaises(AssertionError) as raises:
self.check(test_impl)
self.assertIn("\'@do_scheduling\' not found", str(raises.exception))
def test_untraced_value_parfor(self):
# This is a test for issue #6478.
def test_impl(arr):
a = (1.2, 1.3)
n1 = len(arr)
arr2 = np.empty(n1, np.float64)
for i in prange(n1):
arr2[i] = arr[i] * a[0]
n2 = len(arr2)
arr3 = np.empty(n2, np.float64)
for j in prange(n2):
arr3[j] = arr2[j] - a[1]
total = 0.0
n3 = len(arr3)
for k in prange(n3):
total += arr3[k]
return total + a[0]
arg = (types.Array(types.int64, 1, 'C'), )
self.assertEqual(countParfors(test_impl, arg), 1)
arr = np.arange(10, dtype=np.int64)
self.check(test_impl, arr)
def test_setitem_2d_one_replaced(self):
# issue7843
def test_impl(x):
count = 0
for n in range(x.shape[0]):
# Useless "if" necessary to trigger bug.
if n:
n
x[count, :] = 1
count += 1
return x
self.check(test_impl, np.zeros((3, 1)))
def test_1array_control_flow(self):
# issue8146
def test_impl(arr, flag1, flag2):
inv = np.arange(arr.size)
if flag1:
return inv.astype(np.float64)
if flag2:
ret = inv[inv]
else:
ret = inv[inv - 1]
return ret / arr.size
arr = np.arange(100)
self.check(test_impl, arr, True, False)
self.check(test_impl, arr, True, True)
self.check(test_impl, arr, False, False)
def test_2array_1_control_flow(self):
# issue8146
def test_impl(arr, l, flag):
inv1 = np.arange(arr.size)
inv2 = np.arange(l, arr.size + l)
if flag:
ret = inv1[inv1]
else:
ret = inv1[inv1 - 1]
return ret / inv2
arr = np.arange(100)
self.check(test_impl, arr, 10, True)
self.check(test_impl, arr, 10, False)
def test_2array_2_control_flow(self):
# issue8146
def test_impl(arr, l, flag):
inv1 = np.arange(arr.size)
inv2 = np.arange(l, arr.size + l)
if flag:
ret1 = inv1[inv1]
ret2 = inv2[inv1]
else:
ret1 = inv1[inv1 - 1]
ret2 = inv2[inv1 - 1]
return ret1 / ret2
arr = np.arange(100)
self.check(test_impl, arr, 10, True)
self.check(test_impl, arr, 10, False)
def test_issue8515(self):
# issue8515: an array is filled in the first prange and
# then accessed with c[i - 1] in the next prange which
# should prevent fusion with the previous prange.
def test_impl(n):
r = np.zeros(n, dtype=np.intp)
c = np.zeros(n, dtype=np.intp)
for i in prange(n):
for j in range(i):
c[i] += 1
for i in prange(n):
if i == 0:
continue
r[i] = c[i] - c[i - 1]
return r[1:]
self.check(test_impl, 15)
self.assertEqual(countParfors(test_impl, (types.int64, )), 2)
def test_fusion_no_side_effects(self):
def test_impl(a, b):
X = np.ones(100)
b = math.ceil(b)
Y = np.ones(100)
c = int(max(a, b))
return X + Y + c
self.check(test_impl, 3.7, 4.3)
self.assertEqual(countParfors(test_impl, (types.float64, types.float64)), 1)
@skip_parfors_unsupported
class TestParforsLeaks(MemoryLeakMixin, TestParforsBase):
def check(self, pyfunc, *args, **kwargs):
cfunc, cpfunc = self.compile_all(pyfunc, *args)
self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs)
def test_reduction(self):
# issue4299
@njit(parallel=True)
def test_impl(arr):
return arr.sum()
arr = np.arange(10).astype(np.float64)
self.check(test_impl, arr)
def test_multiple_reduction_vars(self):
@njit(parallel=True)
def test_impl(arr):
a = 0.
b = 1.
for i in prange(arr.size):
a += arr[i]
b += 1. / (arr[i] + 1)
return a * b
arr = np.arange(10).astype(np.float64)
self.check(test_impl, arr)
@skip_parfors_unsupported
class TestParforsSlice(TestParforsBase):
def test_parfor_slice1(self):
def test_impl(a):
(n,) = a.shape
b = a[0:n-2] + a[1:n-1]
return b
self.check(test_impl, np.ones(10))
def test_parfor_slice2(self):
def test_impl(a, m):
(n,) = a.shape
b = a[0:n-2] + a[1:m]
return b
# runtime assertion should succeed
self.check(test_impl, np.ones(10), 9)
# next we expect failure
with self.assertRaises(AssertionError) as raises:
njit(parallel=True)(test_impl)(np.ones(10),10)
self.assertIn("do not match", str(raises.exception))
def test_parfor_slice3(self):
def test_impl(a):
(m,n) = a.shape
b = a[0:m-1,0:n-1] + a[1:m,1:n]
return b
self.check(test_impl, np.ones((4,3)))
def test_parfor_slice4(self):
def test_impl(a):
(m,n) = a.shape
b = a[:,0:n-1] + a[:,1:n]
return b
self.check(test_impl, np.ones((4,3)))
def test_parfor_slice5(self):
def test_impl(a):
(m,n) = a.shape
b = a[0:m-1,:] + a[1:m,:]
return b
self.check(test_impl, np.ones((4,3)))
def test_parfor_slice6(self):
def test_impl(a):
b = a.transpose()
c = a[1,:] + b[:,1]
return c
self.check(test_impl, np.ones((4,3)))
def test_parfor_slice7(self):
def test_impl(a):
b = a.transpose()
c = a[1,:] + b[1,:]
return c
# runtime check should succeed
self.check(test_impl, np.ones((3,3)))
# next we expect failure
with self.assertRaises(AssertionError) as raises:
njit(parallel=True)(test_impl)(np.ones((3,4)))
self.assertIn("do not match", str(raises.exception))
@disabled_test
def test_parfor_slice8(self):
def test_impl(a):
(m,n) = a.shape
b = a.transpose()
b[1:m,1:n] = a[1:m,1:n]
return b
self.check(test_impl, np.arange(9).reshape((3,3)))
@disabled_test
def test_parfor_slice9(self):
def test_impl(a):
(m,n) = a.shape
b = a.transpose()
b[1:n,1:m] = a[:,1:m]
return b
self.check(test_impl, np.arange(12).reshape((3,4)))
@disabled_test
def test_parfor_slice10(self):
def test_impl(a):
(m,n) = a.shape
b = a.transpose()
b[2,1:m] = a[2,1:m]
return b
self.check(test_impl, np.arange(9).reshape((3,3)))
def test_parfor_slice11(self):
def test_impl(a):
(m,n,l) = a.shape
b = a.copy()
b[:,1,1:l] = a[:,2,1:l]
return b
self.check(test_impl, np.arange(27).reshape((3,3,3)))
def test_parfor_slice12(self):
def test_impl(a):
(m,n) = a.shape
b = a.copy()
b[1,1:-1] = a[0,:-2]
return b
self.check(test_impl, np.arange(12).reshape((3,4)))
def test_parfor_slice13(self):
def test_impl(a):
(m,n) = a.shape
b = a.copy()
c = -1
b[1,1:c] = a[0,-n:c-1]
return b
self.check(test_impl, np.arange(12).reshape((3,4)))
def test_parfor_slice14(self):
def test_impl(a):
(m,n) = a.shape
b = a.copy()
b[1,:-1] = a[0,-3:4]
return b
self.check(test_impl, np.arange(12).reshape((3,4)))
def test_parfor_slice15(self):
def test_impl(a):
(m,n) = a.shape
b = a.copy()
b[1,-(n-1):] = a[0,-3:4]
return b
self.check(test_impl, np.arange(12).reshape((3,4)))
@disabled_test
def test_parfor_slice16(self):
""" This test is disabled because if n is larger than the array size
then n and n-1 will both be the end of the array and thus the
slices will in fact be of different sizes and unable to fuse.
"""
def test_impl(a, b, n):
assert(a.shape == b.shape)
a[1:n] = 10
b[0:(n-1)] = 10
return a * b
self.check(test_impl, np.ones(10), np.zeros(10), 8)
args = (numba.float64[:], numba.float64[:], numba.int64)
self.assertEqual(countParfors(test_impl, args), 2)
def test_parfor_slice17(self):
def test_impl(m, A):
B = np.zeros(m)
n = len(A)
B[-n:] = A
return B
self.check(test_impl, 10, np.ones(10))
def test_parfor_slice18(self):
# issue 3534
def test_impl():
a = np.zeros(10)
a[1:8] = np.arange(0, 7)
y = a[3]
return y
self.check(test_impl)
def test_parfor_slice19(self):
# issues #3561 and #3554, empty slice binop
def test_impl(X):
X[:0] += 1
return X
self.check(test_impl, np.ones(10))
def test_parfor_slice20(self):
# issue #4075, slice size
def test_impl():
a = np.ones(10)
c = a[1:]
s = len(c)
return s
self.check(test_impl, check_scheduling=False)
def test_parfor_slice21(self):
def test_impl(x1, x2):
x1 = x1.reshape(x1.size, 1)
x2 = x2.reshape(x2.size, 1)
return x1 >= x2[:-1, :]
x1 = np.random.rand(5)
x2 = np.random.rand(6)
self.check(test_impl, x1, x2)
def test_parfor_slice22(self):
def test_impl(x1, x2):
b = np.zeros((10,))
for i in prange(1):
b += x1[:, x2]
return b
x1 = np.zeros((10,7))
x2 = np.array(4)
self.check(test_impl, x1, x2)
def test_parfor_slice23(self):
# issue #4630
def test_impl(x):
x[:0] = 2
return x
self.check(test_impl, np.ones(10))
def test_parfor_slice24(self):
def test_impl(m, A, n):
B = np.zeros(m)
C = B[n:]
C = A[:len(C)]
return B
for i in range(-15, 15):
self.check(test_impl, 10, np.ones(10), i)
def test_parfor_slice25(self):
def test_impl(m, A, n):
B = np.zeros(m)
C = B[:n]
C = A[:len(C)]
return B
for i in range(-15, 15):
self.check(test_impl, 10, np.ones(10), i)
def test_parfor_slice26(self):
def test_impl(a):
(n,) = a.shape
b = a.copy()
b[-(n-1):] = a[-3:4]
return b
self.check(test_impl, np.arange(4))
def test_parfor_slice27(self):
# issue5601: tests array analysis of the slice with
# n_valid_vals of unknown size.
def test_impl(a):
n_valid_vals = 0
for i in prange(a.shape[0]):
if a[i] != 0:
n_valid_vals += 1
if n_valid_vals:
unused = a[:n_valid_vals]
return 0
self.check(test_impl, np.arange(3))
def test_parfor_array_access_lower_slice(self):
for ts in [slice(1, 3, None), slice(2, None, None), slice(None, 2, -1),
slice(None, None, None), slice(None, None, -2)]:
def test_impl(n):
X = np.arange(n * 4).reshape((n, 4))
y = 0
for i in numba.prange(n):
y += X[i, ts].sum()
return y
n = 10
self.check(test_impl, n)
X = np.arange(n * 4).reshape((n, 4))
def test_impl(X):
y = 0
for i in numba.prange(X.shape[0]):
y += X[i, ts].sum()
return y
self.check(test_impl, X)
@skip_parfors_unsupported
class TestParforsOptions(TestParforsBase):
def test_parfor_options(self):
def test_impl(a):
n = a.shape[0]
b = np.ones(n)
c = np.array([ i for i in range(n) ])
b[:n] = a + b * c
for i in prange(n):
c[i] = b[i] * a[i]
return reduce(lambda x,y:x+y, c, 0)
self.check(test_impl, np.ones(10))
args = (numba.float64[:],)
# everything should fuse with default option
self.assertEqual(countParfors(test_impl, args), 1)
# with no fusion
self.assertEqual(countParfors(test_impl, args, fusion=False), 6)
# with no fusion, comprehension
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False), 5)
#with no fusion, comprehension, setitem
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False, setitem=False), 4)
# with no fusion, comprehension, prange
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False, setitem=False, prange=False), 3)
# with no fusion, comprehension, prange, reduction
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False, setitem=False, prange=False,
reduction=False), 2)
# with no fusion, comprehension, prange, reduction, numpy
self.assertEqual(countParfors(test_impl, args, fusion=False,
comprehension=False, setitem=False, prange=False,
reduction=False, numpy=False), 0)
@skip_parfors_unsupported
class TestParforsBitMask(TestParforsBase):
def test_parfor_bitmask1(self):
def test_impl(a, n):
b = a > n
a[b] = 0
return a
self.check(test_impl, np.arange(10), 5)
def test_parfor_bitmask2(self):
def test_impl(a, b):
a[b] = 0
return a
a = np.arange(10)
b = a > 5
self.check(test_impl, a, b)
def test_parfor_bitmask3(self):
def test_impl(a, b):
a[b] = a[b]
return a
a = np.arange(10)
b = a > 5
self.check(test_impl, a, b)
def test_parfor_bitmask4(self):
def test_impl(a, b):
a[b] = (2 * a)[b]
return a
a = np.arange(10)
b = a > 5
self.check(test_impl, a, b)
def test_parfor_bitmask5(self):
def test_impl(a, b):
a[b] = a[b] * a[b]
return a
a = np.arange(10)
b = a > 5
self.check(test_impl, a, b)
def test_parfor_bitmask6(self):
def test_impl(a, b, c):
a[b] = c
return a
a = np.arange(10)
b = a > 5
c = np.zeros(sum(b))
# expect failure due to lack of parallelism
with self.assertRaises(AssertionError) as raises:
self.check(test_impl, a, b, c)
self.assertIn("\'@do_scheduling\' not found", str(raises.exception))
@skip_parfors_unsupported
class TestParforsMisc(TestParforsBase):
"""
Tests miscellaneous parts of ParallelAccelerator use.
"""
def test_no_warn_if_cache_set(self):
def pyfunc():
arr = np.ones(100)
for i in prange(arr.size):
arr[i] += i
return arr
cfunc = njit(parallel=True, cache=True)(pyfunc)
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter('always')
warnings.filterwarnings(action="ignore",
module="typeguard")
# Filter out warnings about TBB interface mismatch
warnings.filterwarnings(action='ignore',
message=r".*TBB_INTERFACE_VERSION.*",
category=numba.errors.NumbaWarning,
module=r'numba\.np\.ufunc\.parallel.*')
cfunc()
self.assertEqual(len(raised_warnings), 0)
# Make sure the dynamic globals flag is set
has_dynamic_globals = [cres.library.has_dynamic_globals
for cres in cfunc.overloads.values()]
self.assertEqual(has_dynamic_globals, [False])
def test_statement_reordering_respects_aliasing(self):
def impl():
a = np.zeros(10)
a[1:8] = np.arange(0, 7)
print('a[3]:', a[3])
print('a[3]:', a[3])
return a
cres = self.compile_parallel(impl, ())
with captured_stdout() as stdout:
cres.entry_point()
for line in stdout.getvalue().splitlines():
self.assertEqual('a[3]: 2.0', line)
def test_parfor_ufunc_typing(self):
def test_impl(A):
return np.isinf(A)
A = np.array([np.inf, 0.0])
cfunc = njit(parallel=True)(test_impl)
# save global state
old_seq_flag = numba.parfors.parfor.sequential_parfor_lowering
try:
numba.parfors.parfor.sequential_parfor_lowering = True
np.testing.assert_array_equal(test_impl(A), cfunc(A))
finally:
# recover global state
numba.parfors.parfor.sequential_parfor_lowering = old_seq_flag
def test_init_block_dce(self):
# issue4690
def test_impl():
res = 0
arr = [1,2,3,4,5]
numba.parfors.parfor.init_prange()
dummy = arr
for i in numba.prange(5):
res += arr[i]
return res + dummy[2]
self.assertEqual(get_init_block_size(test_impl, ()), 0)
def test_alias_analysis_for_parfor1(self):
def test_impl():
acc = 0
for _ in range(4):
acc += 1
data = np.zeros((acc,))
return data
self.check(test_impl)
def test_no_state_change_in_gufunc_lowering_on_error(self):
# tests #5098, if there's an exception arising in gufunc lowering the
# sequential_parfor_lowering global variable should remain as False on
# stack unwind.
BROKEN_MSG = 'BROKEN_MSG'
@register_pass(mutates_CFG=True, analysis_only=False)
class BreakParfors(AnalysisPass):
_name = "break_parfors"
def __init__(self):
AnalysisPass.__init__(self)
def run_pass(self, state):
for blk in state.func_ir.blocks.values():
for stmt in blk.body:
if isinstance(stmt, numba.parfors.parfor.Parfor):
# races should be a set(), that list is iterable
# permits it to get through to the
# _create_gufunc_for_parfor_body routine at which
# point it needs to be a set so e.g. set.difference
# can be computed, this therefore creates an error
# in the right location.
class Broken(list):
def difference(self, other):
raise errors.LoweringError(BROKEN_MSG)
stmt.races = Broken()
return True
class BreakParforsCompiler(CompilerBase):
def define_pipelines(self):
pm = DefaultPassBuilder.define_nopython_pipeline(self.state)
pm.add_pass_after(BreakParfors, IRLegalization)
pm.finalize()
return [pm]
@njit(parallel=True, pipeline_class=BreakParforsCompiler)
def foo():
x = 1
for _ in prange(1):
x += 1
return x
# assert default state for global
self.assertFalse(numba.parfors.parfor.sequential_parfor_lowering)
with self.assertRaises(errors.LoweringError) as raises:
foo()
self.assertIn(BROKEN_MSG, str(raises.exception))
# assert state has not changed
self.assertFalse(numba.parfors.parfor.sequential_parfor_lowering)
def test_issue_5098(self):
class DummyType(types.Opaque):
pass
dummy_type = DummyType("my_dummy")
register_model(DummyType)(models.OpaqueModel)
class Dummy(object):
pass
@typeof_impl.register(Dummy)
def typeof_Dummy(val, c):
return dummy_type
@unbox(DummyType)
def unbox_index(typ, obj, c):
return NativeValue(c.context.get_dummy_value())
@overload_method(DummyType, "method1", jit_options={"parallel":True})
def _get_method1(obj, arr, func):
def _foo(obj, arr, func):
def baz(a, f):
c = a.copy()
c[np.isinf(a)] = np.nan
return f(c)
length = len(arr)
output_arr = np.empty(length, dtype=np.float64)
for i in prange(length):
output_arr[i] = baz(arr[i], func)
for i in prange(length - 1):
output_arr[i] += baz(arr[i], func)
return output_arr
return _foo
@njit
def bar(v):
return v.mean()
@njit
def test1(d):
return d.method1(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), bar)
save_state = numba.parfors.parfor.sequential_parfor_lowering
self.assertFalse(save_state)
try:
test1(Dummy())
self.assertFalse(numba.parfors.parfor.sequential_parfor_lowering)
finally:
# always set the sequential_parfor_lowering state back to the
# original state
numba.parfors.parfor.sequential_parfor_lowering = save_state
def test_oversized_tuple_as_arg_to_kernel(self):
@njit(parallel=True)
def oversize_tuple(idx):
big_tup = (1,2,3,4)
z = 0
for x in prange(10):
z += big_tup[idx]
return z
with override_env_config('NUMBA_PARFOR_MAX_TUPLE_SIZE', '3'):
with self.assertRaises(errors.UnsupportedParforsError) as raises:
oversize_tuple(0)
errstr = str(raises.exception)
self.assertIn("Use of a tuple", errstr)
self.assertIn("in a parallel region", errstr)
def test_issue5167(self):
def ndvi_njit(img_nir, img_red):
fillvalue = 0
out_img = np.full(img_nir.shape, fillvalue, dtype=img_nir.dtype)
dims = img_nir.shape
for y in prange(dims[0]):
for x in prange(dims[1]):
out_img[y, x] = ((img_nir[y, x] - img_red[y, x]) /
(img_nir[y, x] + img_red[y, x]))
return out_img
tile_shape = (4, 4)
array1 = np.random.uniform(low=1.0, high=10000.0, size=tile_shape)
array2 = np.random.uniform(low=1.0, high=10000.0, size=tile_shape)
self.check(ndvi_njit, array1, array2)
def test_issue5065(self):
def reproducer(a, dist, dist_args):
result = np.zeros((a.shape[0], a.shape[0]), dtype=np.float32)
for i in prange(a.shape[0]):
for j in range(i + 1, a.shape[0]):
d = dist(a[i], a[j], *dist_args)
result[i, j] = d
result[j, i] = d
return result
@njit
def euclidean(x, y):
result = 0.0
for i in range(x.shape[0]):
result += (x[i] - y[i]) ** 2
return np.sqrt(result)
a = np.random.random(size=(5, 2))
got = njit(parallel=True)(reproducer)(a.copy(), euclidean,())
expected = reproducer(a.copy(), euclidean,())
np.testing.assert_allclose(got, expected)
def test_issue5001(self):
def test_numba_parallel(myarray):
result = [0] * len(myarray)
for i in prange(len(myarray)):
result[i] = len(myarray[i])
return result
myarray = (np.empty(100),np.empty(50))
self.check(test_numba_parallel, myarray)
def test_issue3169(self):
@njit
def foo(grids):
pass
@njit(parallel=True)
def bar(grids):
for x in prange(1):
foo(grids)
# returns nothing, just check it compiles
bar(([1],) * 2)
@disabled_test
def test_issue4846(self):
mytype = namedtuple("mytype", ("a", "b"))
def outer(mydata):
for k in prange(3):
inner(k, mydata)
return mydata.a
@njit(nogil=True)
def inner(k, mydata):
f = (k, mydata.a)
g = (k, mydata.b)
mydata = mytype(a="a", b="b")
self.check(outer, mydata)
def test_issue3748(self):
def test1b():
x = (1, 2, 3, 4, 5)
a = 0
for i in prange(len(x)):
a += x[i]
return a
self.check(test1b,)
def test_issue5277(self):
def parallel_test(size, arr):
for x in prange(size[0]):
for y in prange(size[1]):
arr[y][x] = x * 4.5 + y
return arr
size = (10, 10)
arr = np.zeros(size, dtype=int)
self.check(parallel_test, size, arr)
def test_issue5570_ssa_races(self):
@njit(parallel=True)
def foo(src, method, out):
for i in prange(1):
for j in range(1):
out[i, j] = 1
if method:
out += 1
return out
src = np.zeros((5,5))
method = 57
out = np.zeros((2, 2))
self.assertPreciseEqual(
foo(src, method, out),
foo.py_func(src, method, out)
)
def test_issue6095_numpy_max(self):
@njit(parallel=True)
def find_maxima_3D_jit(args):
package = args
for index in range(0, 10):
z_stack = package[index, :, :]
return np.max(z_stack)
np.random.seed(0)
args = np.random.random((10, 10, 10))
self.assertPreciseEqual(
find_maxima_3D_jit(args),
find_maxima_3D_jit.py_func(args),
)
def test_issue5942_1(self):
# issue5942: tests statement reordering of
# aliased arguments.
def test_impl(gg, gg_next):
gs = gg.shape
d = gs[0]
for i_gg in prange(d):
gg_next[i_gg, :] = gg[i_gg, :]
gg_next[i_gg, 0] += 1
return gg_next
d = 4
k = 2
gg = np.zeros((d, k), dtype = np.int32)
gg_next = np.zeros((d, k), dtype = np.int32)
self.check(test_impl, gg, gg_next)
def test_issue5942_2(self):
# issue5942: tests statement reordering
def test_impl(d, k):
gg = np.zeros((d, k), dtype = np.int32)
gg_next = np.zeros((d, k), dtype = np.int32)
for i_gg in prange(d):
for n in range(k):
gg[i_gg, n] = i_gg
gg_next[i_gg, :] = gg[i_gg, :]
gg_next[i_gg, 0] += 1
return gg_next
d = 4
k = 2
self.check(test_impl, d, k)
@skip_unless_scipy
def test_issue6102(self):
# The problem is originally observed on Python3.8 because of the
# changes in how loops are represented in 3.8 bytecode.
@njit(parallel=True)
def f(r):
for ir in prange(r.shape[0]):
dist = np.inf
tr = np.array([0, 0, 0], dtype=np.float32)
for i in [1, 0, -1]:
dist_t = np.linalg.norm(r[ir, :] + i)
if dist_t < dist:
dist = dist_t
tr = np.array([i, i, i], dtype=np.float32)
r[ir, :] += tr
return r
r = np.array([[0., 0., 0.], [0., 0., 1.]])
self.assertPreciseEqual(f(r), f.py_func(r))
def test_issue6774(self):
@njit(parallel=True)
def test_impl():
n = 5
na_mask = np.ones((n,))
result = np.empty((n - 1,))
for i in prange(len(result)):
result[i] = np.sum(na_mask[i:i + 1])
return result
self.check(test_impl)
def test_issue4963_globals(self):
def test_impl():
buf = np.zeros((_GLOBAL_INT_FOR_TESTING1, _GLOBAL_INT_FOR_TESTING2))
return buf
self.check(test_impl)
def test_issue4963_freevars(self):
_FREEVAR_INT_FOR_TESTING1 = 17
_FREEVAR_INT_FOR_TESTING2 = 5
def test_impl():
buf = np.zeros((_FREEVAR_INT_FOR_TESTING1, _FREEVAR_INT_FOR_TESTING2))
return buf
self.check(test_impl)
@skip_parfors_unsupported
class TestParforsDiagnostics(TestParforsBase):
def check(self, pyfunc, *args, **kwargs):
cfunc, cpfunc = self.compile_all(pyfunc, *args)
self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs)
def assert_fusion_equivalence(self, got, expected):
a = self._fusion_equivalent(got)
b = self._fusion_equivalent(expected)
self.assertEqual(a, b)
def _fusion_equivalent(self, thing):
# parfors indexes the Parfors class instance id's from wherever the
# internal state happens to be. To assert fusion equivalence we just
# check that the relative difference between fusion adjacency lists
# is the same. For example:
# {3: [2, 1]} is the same as {13: [12, 11]}
# this function strips the indexing etc out returning something suitable
# for checking equivalence
new = defaultdict(list)
min_key = min(thing.keys())
for k in sorted(thing.keys()):
new[k - min_key] = [x - min_key for x in thing[k]]
return new
def assert_diagnostics(self, diagnostics, parfors_count=None,
fusion_info=None, nested_fusion_info=None,
replaced_fns=None, hoisted_allocations=None):
if parfors_count is not None:
self.assertEqual(parfors_count, diagnostics.count_parfors())
if fusion_info is not None:
self.assert_fusion_equivalence(fusion_info, diagnostics.fusion_info)
if nested_fusion_info is not None:
self.assert_fusion_equivalence(nested_fusion_info,
diagnostics.nested_fusion_info)
if replaced_fns is not None:
repl = diagnostics.replaced_fns.values()
for x in replaced_fns:
for replaced in repl:
if replaced[0] == x:
break
else:
msg = "Replacement for %s was not found. Had %s" % (x, repl)
raise AssertionError(msg)
if hoisted_allocations is not None:
hoisted_allocs = diagnostics.hoisted_allocations()
self.assertEqual(hoisted_allocations, len(hoisted_allocs))
# just make sure that the dump() function doesn't have an issue!
with captured_stdout():
for x in range(1, 5):
diagnostics.dump(x)
def test_array_expr(self):
def test_impl():
n = 10
a = np.ones(n)
b = np.zeros(n)
return a + b
self.check(test_impl,)
cpfunc = self.compile_parallel(test_impl, ())
diagnostics = cpfunc.metadata['parfor_diagnostics']
self.assert_diagnostics(diagnostics, parfors_count=1,
fusion_info = {3: [4, 5]})
def test_prange(self):
def test_impl():
n = 10
a = np.empty(n)
for i in prange(n):
a[i] = i * 10
return a
self.check(test_impl,)
cpfunc = self.compile_parallel(test_impl, ())
diagnostics = cpfunc.metadata['parfor_diagnostics']
self.assert_diagnostics(diagnostics, parfors_count=1)
def test_user_varname(self):
"""make sure original user variable name is used in fusion info
"""
def test_impl():
n = 10
x = np.ones(n)
a = np.sin(x)
b = np.cos(a * a)
acc = 0
for i in prange(n - 2):
for j in prange(n - 1):
acc += b[i] + b[j + 1]
return acc
self.check(test_impl,)
cpfunc = self.compile_parallel(test_impl, ())
diagnostics = cpfunc.metadata['parfor_diagnostics']
# make sure original 'n' variable name is used in fusion report for loop
# dimension mismatch
self.assertTrue(
any("slice(0, n, 1)" in r.message for r in diagnostics.fusion_reports))
def test_nested_prange(self):
def test_impl():
n = 10
a = np.empty((n, n))
for i in prange(n):
for j in prange(n):
a[i, j] = i * 10 + j
return a
self.check(test_impl,)
cpfunc = self.compile_parallel(test_impl, ())
diagnostics = cpfunc.metadata['parfor_diagnostics']
self.assert_diagnostics(diagnostics, parfors_count=2,
nested_fusion_info={2: [1]})
def test_function_replacement(self):
def test_impl():
n = 10
a = np.ones(n)
b = np.argmin(a)
return b
self.check(test_impl,)
cpfunc = self.compile_parallel(test_impl, ())
diagnostics = cpfunc.metadata['parfor_diagnostics']
self.assert_diagnostics(diagnostics, parfors_count=1,
fusion_info={2: [3]},
replaced_fns = [('argmin', 'numpy'),])
def test_reduction(self):
def test_impl():
n = 10
a = np.ones(n + 1) # prevent fusion
acc = 0
for i in prange(n):
acc += a[i]
return acc
self.check(test_impl,)
cpfunc = self.compile_parallel(test_impl, ())
diagnostics = cpfunc.metadata['parfor_diagnostics']
self.assert_diagnostics(diagnostics, parfors_count=2)
def test_setitem(self):
def test_impl():
n = 10
a = np.ones(n)
a[:] = 7
return a
self.check(test_impl,)
cpfunc = self.compile_parallel(test_impl, ())
diagnostics = cpfunc.metadata['parfor_diagnostics']
self.assert_diagnostics(diagnostics, parfors_count=1)
def test_allocation_hoisting(self):
def test_impl():
n = 10
m = 5
acc = 0
for i in prange(n):
temp = np.zeros((m,)) # the np.empty call should get hoisted
for j in range(m):
temp[j] = i
acc += temp[-1]
return acc
self.check(test_impl,)
cpfunc = self.compile_parallel(test_impl, ())
diagnostics = cpfunc.metadata['parfor_diagnostics']
self.assert_diagnostics(diagnostics, hoisted_allocations=1)
class TestPrangeBase(TestParforsBase):
def generate_prange_func(self, pyfunc, patch_instance):
"""
This function does the actual code augmentation to enable the explicit
testing of `prange` calls in place of `range`.
"""
pyfunc_code = pyfunc.__code__
prange_names = list(pyfunc_code.co_names)
if patch_instance is None:
# patch all instances, cheat by just switching
# range for prange
assert 'range' in pyfunc_code.co_names
prange_names = tuple([x if x != 'range' else 'prange'
for x in pyfunc_code.co_names])
new_code = bytes(pyfunc_code.co_code)
else:
# patch specified instances...
# find where 'range' is in co_names
range_idx = pyfunc_code.co_names.index('range')
range_locations = []
# look for LOAD_GLOBALs that point to 'range'
for instr in dis.Bytecode(pyfunc_code):
if instr.opname == 'LOAD_GLOBAL':
if _fix_LOAD_GLOBAL_arg(instr.arg) == range_idx:
range_locations.append(instr.offset + 1)
# add in 'prange' ref
prange_names.append('prange')
prange_names = tuple(prange_names)
prange_idx = len(prange_names) - 1
if utils.PYVERSION == (3, 11):
# this is the inverse of _fix_LOAD_GLOBAL_arg
prange_idx = 1 + (prange_idx << 1)
new_code = bytearray(pyfunc_code.co_code)
assert len(patch_instance) <= len(range_locations)
# patch up the new byte code
for i in patch_instance:
idx = range_locations[i]
new_code[idx] = prange_idx
new_code = bytes(new_code)
# create code object with prange mutation
prange_code = pyfunc_code.replace(co_code=new_code,
co_names=prange_names)
# get function
pfunc = pytypes.FunctionType(prange_code, globals())
return pfunc
def prange_tester(self, pyfunc, *args, **kwargs):
"""
The `prange` tester
This is a hack. It basically switches out range calls for prange.
It does this by copying the live code object of a function
containing 'range' then copying the .co_names and mutating it so
that 'range' is replaced with 'prange'. It then creates a new code
object containing the mutation and instantiates a function to contain
it. At this point three results are created:
1. The result of calling the original python function.
2. The result of calling a njit compiled version of the original
python function.
3. The result of calling a njit(parallel=True) version of the mutated
function containing `prange`.
The three results are then compared and the `prange` based function's
llvm_ir is inspected to ensure the scheduler code is present.
Arguments:
pyfunc - the python function to test
args - data arguments to pass to the pyfunc under test
Keyword Arguments:
patch_instance - iterable containing which instances of `range` to
replace. If not present all instance of `range` are
replaced.
scheduler_type - 'signed', 'unsigned' or None, default is None.
Supply in cases where the presence of a specific
scheduler is to be asserted.
check_fastmath - if True then a check will be performed to ensure the
IR contains instructions labelled with 'fast'
check_fastmath_result - if True then a check will be performed to
ensure the result of running with fastmath
on matches that of the pyfunc
Remaining kwargs are passed to np.testing.assert_almost_equal
Example:
def foo():
acc = 0
for x in range(5):
for y in range(10):
acc +=1
return acc
# calling as
prange_tester(foo)
# will test code equivalent to
# def foo():
# acc = 0
# for x in prange(5): # <- changed
# for y in prange(10): # <- changed
# acc +=1
# return acc
# calling as
prange_tester(foo, patch_instance=[1])
# will test code equivalent to
# def foo():
# acc = 0
# for x in range(5): # <- outer loop (0) unchanged
# for y in prange(10): # <- inner loop (1) changed
# acc +=1
# return acc
"""
patch_instance = kwargs.pop('patch_instance', None)
check_fastmath = kwargs.pop('check_fastmath', False)
check_fastmath_result = kwargs.pop('check_fastmath_result', False)
pfunc = self.generate_prange_func(pyfunc, patch_instance)
# Compile functions
# compile a standard njit of the original function
sig = tuple([numba.typeof(x) for x in args])
cfunc = self.compile_njit(pyfunc, sig)
# compile the prange injected function
with warnings.catch_warnings(record=True) as raised_warnings:
warnings.simplefilter('always')
cpfunc = self.compile_parallel(pfunc, sig)
# if check_fastmath is True then check fast instructions
if check_fastmath:
self.assert_fastmath(pfunc, sig)
# if check_fastmath_result is True then compile a function
# so that the parfors checker can assert the result is ok.
if check_fastmath_result:
fastcpfunc = self.compile_parallel_fastmath(pfunc, sig)
kwargs = dict({'fastmath_pcres': fastcpfunc}, **kwargs)
self.check_parfors_vs_others(pyfunc, cfunc, cpfunc, *args, **kwargs)
return raised_warnings
@skip_parfors_unsupported
class TestPrangeBasic(TestPrangeBase):
""" Tests Prange """
def test_prange01(self):
def test_impl():
n = 4
A = np.zeros(n)
for i in range(n):
A[i] = 2.0 * i
return A
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange02(self):
def test_impl():
n = 4
A = np.zeros(n - 1)
for i in range(1, n):
A[i - 1] = 2.0 * i
return A
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange03(self):
def test_impl():
s = 10
for i in range(10):
s += 2
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange03mul(self):
def test_impl():
s = 3
for i in range(10):
s *= 2
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange03sub(self):
def test_impl():
s = 100
for i in range(10):
s -= 2
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange03div(self):
def test_impl():
s = 10
for i in range(10):
s /= 2
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange04(self):
def test_impl():
a = 2
b = 3
A = np.empty(4)
for i in range(4):
if i == a:
A[i] = b
else:
A[i] = 0
return A
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange05(self):
def test_impl():
n = 4
A = np.ones((n), dtype=np.float64)
s = 0
for i in range(1, n - 1, 1):
s += A[i]
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange06(self):
def test_impl():
n = 4
A = np.ones((n), dtype=np.float64)
s = 0
for i in range(1, 1, 1):
s += A[i]
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange07(self):
def test_impl():
n = 4
A = np.ones((n), dtype=np.float64)
s = 0
for i in range(n, 1):
s += A[i]
return s
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange08(self):
def test_impl():
n = 4
A = np.ones((n))
acc = 0
for i in range(len(A)):
for j in range(len(A)):
acc += A[i]
return acc
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange08_1(self):
def test_impl():
n = 4
A = np.ones((n))
acc = 0
for i in range(4):
for j in range(4):
acc += A[i]
return acc
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange09(self):
def test_impl():
n = 4
acc = 0
for i in range(n):
for j in range(n):
acc += 1
return acc
# patch inner loop to 'prange'
self.prange_tester(test_impl, patch_instance=[1],
scheduler_type='unsigned',
check_fastmath=True)
def test_prange10(self):
def test_impl():
n = 4
acc2 = 0
for j in range(n):
acc1 = 0
for i in range(n):
acc1 += 1
acc2 += acc1
return acc2
# patch outer loop to 'prange'
self.prange_tester(test_impl, patch_instance=[0],
scheduler_type='unsigned',
check_fastmath=True)
@unittest.skip("list append is not thread-safe yet (#2391, #2408)")
def test_prange11(self):
def test_impl():
n = 4
return [np.sin(j) for j in range(n)]
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange12(self):
def test_impl():
acc = 0
n = 4
X = np.ones(n)
for i in range(-len(X)):
acc += X[i]
return acc
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
def test_prange13(self):
def test_impl(n):
acc = 0
for i in range(n):
acc += 1
return acc
self.prange_tester(test_impl, np.int32(4), scheduler_type='unsigned',
check_fastmath=True)
def test_prange14(self):
def test_impl(A):
s = 3
for i in range(len(A)):
s += A[i]*2
return s
# this tests reduction detection well since the accumulated variable
# is initialized before the parfor and the value accessed from the array
# is updated before accumulation
self.prange_tester(test_impl, np.random.ranf(4),
scheduler_type='unsigned',
check_fastmath=True)
def test_prange15(self):
# from issue 2587
# test parfor type inference when there is multi-dimensional indexing
def test_impl(N):
acc = 0
for i in range(N):
x = np.ones((1, 1))
acc += x[0, 0]
return acc
self.prange_tester(test_impl, 1024, scheduler_type='unsigned',
check_fastmath=True)
# Tests for negative ranges
def test_prange16(self):
def test_impl(N):
acc = 0
for i in range(-N, N):
acc += 2
return acc
self.prange_tester(test_impl, 1024, scheduler_type='signed',
check_fastmath=True)
def test_prange17(self):
def test_impl(N):
acc = 0
X = np.ones(N)
for i in range(-N, N):
acc += X[i]
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange18(self):
def test_impl(N):
acc = 0
X = np.ones(N)
for i in range(-N, 5):
acc += X[i]
for j in range(-4, N):
acc += X[j]
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange19(self):
def test_impl(N):
acc = 0
M = N + 4
X = np.ones((N, M))
for i in range(-N, N):
for j in range(-M, M):
acc += X[i, j]
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange20(self):
def test_impl(N):
acc = 0
X = np.ones(N)
for i in range(-1, N):
acc += X[i]
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange21(self):
def test_impl(N):
acc = 0
for i in range(-3, -1):
acc += 3
return acc
self.prange_tester(test_impl, 9, scheduler_type='signed',
check_fastmath=True)
def test_prange22(self):
def test_impl():
a = 0
b = 3
A = np.empty(4)
for i in range(-2, 2):
if i == a:
A[i] = b
elif i < 1:
A[i] = -1
else:
A[i] = 7
return A
self.prange_tester(test_impl, scheduler_type='signed',
check_fastmath=True, check_fastmath_result=True)
def test_prange23(self):
# test non-contig input
def test_impl(A):
for i in range(len(A)):
A[i] = i
return A
A = np.zeros(32)[::2]
self.prange_tester(test_impl, A, scheduler_type='unsigned',
check_fastmath=True, check_fastmath_result=True)
def test_prange24(self):
# test non-contig input, signed range
def test_impl(A):
for i in range(-len(A), 0):
A[i] = i
return A
A = np.zeros(32)[::2]
self.prange_tester(test_impl, A, scheduler_type='signed',
check_fastmath=True, check_fastmath_result=True)
def test_prange25(self):
def test_impl(A):
n = len(A)
buf = [np.zeros_like(A) for _ in range(n)]
for i in range(n):
buf[i] = A + i
return buf
A = np.ones((10,))
self.prange_tester(test_impl, A, patch_instance=[1],
scheduler_type='unsigned', check_fastmath=True,
check_fastmath_result=True)
cpfunc = self.compile_parallel(test_impl, (numba.typeof(A),))
diagnostics = cpfunc.metadata['parfor_diagnostics']
hoisted_allocs = diagnostics.hoisted_allocations()
self.assertEqual(len(hoisted_allocs), 0)
def test_prange26(self):
def test_impl(A):
B = A[::3]
for i in range(len(B)):
B[i] = i
return A
A = np.zeros(32)[::2]
self.prange_tester(test_impl, A, scheduler_type='unsigned',
check_fastmath=True, check_fastmath_result=True)
def test_prange27(self):
# issue5597: usedef error in parfor
def test_impl(a, b, c):
for j in range(b[0]-1):
for k in range(2):
z = np.abs(a[c-1:c+1])
return 0
# patch inner loop to 'prange'
self.prange_tester(test_impl,
np.arange(20),
np.asarray([4,4,4,4,4,4,4,4,4,4]),
0,
patch_instance=[1],
scheduler_type='unsigned',
check_fastmath=True)
def test_prange28(self):
# issue7105: label conflict in nested parfor
def test_impl(x, y):
out = np.zeros(len(y))
for idx in range(0, len(y)):
i0 = y[idx, 0]
i1 = y[idx, 1]
Pt1 = x[i0]
Pt2 = x[i1]
v = Pt1 - Pt2
vl2 = v[0] + v[1]
out[idx] = vl2
return out
X = np.array([[-1., -1.],
[-1., 1.],
[ 0., 0.],
[ 1., -1.],
[ 1., 0.],
[ 1., 1.]])
Y = np.array([[0, 1],
[1, 2],
[2, 3],
[3, 4],
[4, 5]])
self.prange_tester(test_impl, X, Y, scheduler_type='unsigned',
check_fastmath=True, check_fastmath_result=True)
def test_prange29(self):
# issue7630: SSA renaming in prange header
def test_impl(flag):
result = 0
if flag:
for i in range(1):
result += 1
else:
for i in range(1):
result -= 3
return result
self.prange_tester(test_impl, True)
self.prange_tester(test_impl, False)
def test_prange30(self):
# issue7675: broadcast setitem
def test_impl(x, par, numthreads):
n_par = par.shape[0]
n_x = len(x)
result = np.zeros((n_par, n_x), dtype=np.float64)
chunklen = (len(x) + numthreads - 1) // numthreads
for i in range(numthreads):
start = i * chunklen
stop = (i + 1) * chunklen
result[:, start:stop] = x[start:stop] * par[:]
return result
x = np.array(np.arange(0, 6, 1.0))
par = np.array([1.0, 2.0, 3.0])
self.prange_tester(test_impl, x, par, 2)
@skip_parfors_unsupported
class TestPrangeSpecific(TestPrangeBase):
""" Tests specific features/problems found under prange"""
def test_prange_two_instances_same_reduction_var(self):
# issue4922 - multiple uses of same reduction variable
def test_impl(n):
c = 0
for i in range(n):
c += 1
if i > 10:
c += 1
return c
self.prange_tester(test_impl, 9)
def test_prange_conflicting_reduction_ops(self):
def test_impl(n):
c = 0
for i in range(n):
c += 1
if i > 10:
c *= 1
return c
with self.assertRaises(errors.UnsupportedError) as raises:
self.prange_tester(test_impl, 9)
msg = ('Reduction variable c has multiple conflicting reduction '
'operators.')
self.assertIn(msg, str(raises.exception))
def test_prange_two_conditional_reductions(self):
# issue6414
def test_impl():
A = B = 0
for k in range(1):
if k == 2:
A += 1
else:
x = np.zeros((1, 1))
if x[0, 0]:
B += 1
return A, B
self.prange_tester(test_impl)
def test_prange_nested_reduction1(self):
def test_impl():
A = 0
for k in range(1):
for i in range(1):
if i == 0:
A += 1
return A
self.prange_tester(test_impl)
@disabled_test
def test_check_error_model(self):
def test_impl():
n = 32
A = np.zeros(n)
for i in range(n):
A[i] = 1 / i # div-by-zero when i = 0
return A
with self.assertRaises(ZeroDivisionError) as raises:
test_impl()
# compile parallel functions
pfunc = self.generate_prange_func(test_impl, None)
pcres = self.compile_parallel(pfunc, ())
pfcres = self.compile_parallel_fastmath(pfunc, ())
# should raise
with self.assertRaises(ZeroDivisionError) as raises:
pcres.entry_point()
# should not raise
result = pfcres.entry_point()
self.assertEqual(result[0], np.inf)
def test_check_alias_analysis(self):
# check alias analysis reports ok
def test_impl(A):
for i in range(len(A)):
B = A[i]
B[:] = 1
return A
A = np.zeros(32).reshape(4, 8)
self.prange_tester(test_impl, A, scheduler_type='unsigned',
check_fastmath=True, check_fastmath_result=True)
pfunc = self.generate_prange_func(test_impl, None)
sig = tuple([numba.typeof(A)])
cres = self.compile_parallel_fastmath(pfunc, sig)
_ir = self._get_gufunc_ir(cres)
for k, v in _ir.items():
for line in v.splitlines():
# get the fn definition line
if 'define' in line and k in line:
# there should only be 2x noalias, one on each of the first
# 2 args (retptr, excinfo).
# Note: used to be 3x no noalias, but env arg is dropped.
self.assertEqual(line.count('noalias'), 2)
break
def test_prange_raises_invalid_step_size(self):
def test_impl(N):
acc = 0
for i in range(0, N, 2):
acc += 2
return acc
with self.assertRaises(errors.UnsupportedRewriteError) as raises:
self.prange_tester(test_impl, 1024)
msg = 'Only constant step size of 1 is supported for prange'
self.assertIn(msg, str(raises.exception))
def test_prange_fastmath_check_works(self):
# this function will benefit from `fastmath`, the div will
# get optimised to a multiply by reciprocal and the accumulator
# then becomes an fmadd: A = A + i * 0.5
def test_impl():
n = 128
A = 0
for i in range(n):
A += i / 2.0
return A
self.prange_tester(test_impl, scheduler_type='unsigned',
check_fastmath=True)
pfunc = self.generate_prange_func(test_impl, None)
cres = self.compile_parallel_fastmath(pfunc, ())
ir = self._get_gufunc_ir(cres)
_id = '%[A-Z_0-9]?(.[0-9]+)+[.]?[i]?'
recipr_str = '\s+%s = fmul fast double %s, 5.000000e-01'
reciprocal_inst = re.compile(recipr_str % (_id, _id))
fadd_inst = re.compile('\s+%s = fadd fast double %s, %s'
% (_id, _id, _id))
# check there is something like:
# %.329 = fmul fast double %.325, 5.000000e-01
# %.337 = fadd fast double %A.07, %.329
for name, kernel in ir.items():
splitted = kernel.splitlines()
for i, x in enumerate(splitted):
if reciprocal_inst.match(x):
break
self.assertTrue(fadd_inst.match(splitted[i + 1]))
def test_parfor_alias1(self):
def test_impl(n):
b = np.zeros((n, n))
a = b[0]
for j in range(n):
a[j] = j + 1
return b.sum()
self.prange_tester(test_impl, 4)
def test_parfor_alias2(self):
def test_impl(n):
b = np.zeros((n, n))
for i in range(n):
a = b[i]
for j in range(n):
a[j] = i + j
return b.sum()
self.prange_tester(test_impl, 4)
def test_parfor_alias3(self):
def test_impl(n):
b = np.zeros((n, n, n))
for i in range(n):
a = b[i]
for j in range(n):
c = a[j]
for k in range(n):
c[k] = i + j + k
return b.sum()
self.prange_tester(test_impl, 4)
def test_parfor_race_1(self):
def test_impl(x, y):
for j in range(y):
k = x
return k
raised_warnings = self.prange_tester(test_impl, 10, 20)
warning_obj = raised_warnings[0]
expected_msg = ("Variable k used in parallel loop may be written to "
"simultaneously by multiple workers and may result "
"in non-deterministic or unintended results.")
self.assertIn(expected_msg, str(warning_obj.message))
def test_nested_parfor_push_call_vars(self):
""" issue 3686: if a prange has something inside it that causes
a nested parfor to be generated and both the inner and outer
parfor use the same call variable defined outside the parfors
then ensure that when that call variable is pushed into the
parfor that the call variable isn't duplicated with the same
name resulting in a redundant type lock.
"""
def test_impl():
B = 0
f = np.negative
for i in range(1):
this_matters = f(1.)
B += f(np.zeros(1,))[0]
for i in range(2):
this_matters = f(1.)
B += f(np.zeros(1,))[0]
return B
self.prange_tester(test_impl)
def test_copy_global_for_parfor(self):
""" issue4903: a global is copied next to a parfor so that
it can be inlined into the parfor and thus not have to be
passed to the parfor (i.e., an unsupported function type).
This global needs to be renamed in the block into which
it is copied.
"""
def test_impl(zz, tc):
lh = np.zeros(len(tc))
lc = np.zeros(len(tc))
for i in range(1):
nt = tc[i]
for t in range(nt):
lh += np.exp(zz[i, t])
for t in range(nt):
lc += np.exp(zz[i, t])
return lh, lc
m = 2
zz = np.ones((m, m, m))
tc = np.ones(m, dtype=np.int_)
self.prange_tester(test_impl, zz, tc, patch_instance=[0])
def test_multiple_call_getattr_object(self):
def test_impl(n):
B = 0
f = np.negative
for i in range(1):
this_matters = f(1.0)
B += f(n)
return B
self.prange_tester(test_impl, 1.0)
def test_argument_alias_recarray_field(self):
# Test for issue4007.
def test_impl(n):
for i in range(len(n)):
n.x[i] = 7.0
return n
X1 = np.zeros(10, dtype=[('x', float), ('y', int), ])
X2 = np.zeros(10, dtype=[('x', float), ('y', int), ])
X3 = np.zeros(10, dtype=[('x', float), ('y', int), ])
v1 = X1.view(np.recarray)
v2 = X2.view(np.recarray)
v3 = X3.view(np.recarray)
# Numpy doesn't seem to support almost equal on recarray.
# So, we convert to list and use assertEqual instead.
python_res = list(test_impl(v1))
njit_res = list(njit(test_impl)(v2))
pa_func = njit(test_impl, parallel=True)
pa_res = list(pa_func(v3))
self.assertEqual(python_res, njit_res)
self.assertEqual(python_res, pa_res)
def test_mutable_list_param(self):
""" issue3699: test that mutable variable to call in loop
is not hoisted. The call in test_impl forces a manual
check here rather than using prange_tester.
"""
@njit
def list_check(X):
""" If the variable X is hoisted in the test_impl prange
then subsequent list_check calls would return increasing
values.
"""
ret = X[-1]
a = X[-1] + 1
X.append(a)
return ret
def test_impl(n):
for i in prange(n):
X = [100]
a = list_check(X)
return a
python_res = test_impl(10)
njit_res = njit(test_impl)(10)
pa_func = njit(test_impl, parallel=True)
pa_res = pa_func(10)
self.assertEqual(python_res, njit_res)
self.assertEqual(python_res, pa_res)
def test_list_comprehension_prange(self):
# issue4569
def test_impl(x):
return np.array([len(x[i]) for i in range(len(x))])
x = [np.array([1,2,3], dtype=int),np.array([1,2], dtype=int)]
self.prange_tester(test_impl, x)
def test_ssa_false_reduction(self):
# issue5698
# SSA for h creates assignments to h that make it look like a
# reduction variable except that it lacks an associated
# reduction operator. Test here that h is excluded as a
# reduction variable.
def test_impl(image, a, b):
empty = np.zeros(image.shape)
for i in range(image.shape[0]):
r = image[i][0] / 255.0
if a == 0:
h = 0
if b == 0:
h = 0
empty[i] = [h, h, h]
return empty
image = np.zeros((3, 3), dtype=np.int32)
self.prange_tester(test_impl, image, 0, 0)
def test_list_setitem_hoisting(self):
# issue5979
# Don't hoist list initialization if list item set.
def test_impl():
n = 5
a = np.empty(n, dtype=np.int64)
for k in range(5):
X = [0]
X[0] = 1
a[k] = X[0]
return a
self.prange_tester(test_impl)
def test_record_array_setitem(self):
# issue6704
state_dtype = np.dtype([('var', np.int32)])
def test_impl(states):
for i in range(1):
states[i]['var'] = 1
def comparer(a, b):
assert(a[0]['var'] == b[0]['var'])
self.prange_tester(test_impl,
np.zeros(shape=1, dtype=state_dtype),
check_arg_equality=[comparer])
def test_record_array_setitem_yield_array(self):
state_dtype = np.dtype([('x', np.intp)])
def test_impl(states):
n = states.size
for i in range(states.size):
states["x"][i] = 7 + i
return states
states = np.zeros(10, dtype=state_dtype)
def comparer(a, b):
np.testing.assert_equal(a, b)
self.prange_tester(test_impl,
states,
check_arg_equality=[comparer])
def test_issue7501(self):
def test_impl(size, case):
result = np.zeros((size,))
if case == 1:
for i in range(size):
result[i] += 1
else:
for i in range(size):
result[i] += 2
return result[0]
self.prange_tester(test_impl, 3, 1)
def test_kde_example(self):
def test_impl(X):
# KDE example
b = 0.5
points = np.array([-1.0, 2.0, 5.0])
N = points.shape[0]
n = X.shape[0]
exps = 0
for i in range(n):
p = X[i]
d = (-(p - points)**2) / (2 * b**2)
m = np.min(d)
exps += m - np.log(b * N) + np.log(np.sum(np.exp(d - m)))
return exps
n = 128
X = np.random.ranf(n)
self.prange_tester(test_impl, X)
@skip_parfors_unsupported
def test_issue_due_to_max_label(self):
# Run the actual test in a new process since it can only reproduce in
# a fresh state.
out = subp.check_output(
[sys.executable, '-m', 'numba.tests.parfors_max_label_error'],
timeout=30,
stderr=subp.STDOUT, # redirect stderr to stdout
)
self.assertIn("TEST PASSED", out.decode())
@skip_parfors_unsupported
def test_issue7578(self):
def test_impl(x):
A = np.zeros_like(x)
tmp = np.cos(x) # this can be any 1-arity ufunc
for i in range(len(x)):
A[i] = tmp.sum()
return A
x = np.arange(10.)
self.prange_tester(test_impl, x)
@skip_parfors_unsupported
class TestParforChunksizing(TestCase):
"""
Tests chunksize handling in ParallelAccelerator.
"""
_numba_parallel_test_ = False
def setUp(self):
set_parallel_chunksize(0)
def tearDown(self):
set_parallel_chunksize(0)
def test_python_parallel_chunksize_basic(self):
# Test basic chunksize operations outside njit.
self.assertEqual(get_parallel_chunksize(), 0)
set_parallel_chunksize(8)
self.assertEqual(get_parallel_chunksize(), 8)
set_parallel_chunksize(0)
self.assertEqual(get_parallel_chunksize(), 0)
def test_python_with_chunksize(self):
# Test "with parallel_chunksize" outside njit.
self.assertEqual(get_parallel_chunksize(), 0)
with parallel_chunksize(8):
self.assertEqual(get_parallel_chunksize(), 8)
self.assertEqual(get_parallel_chunksize(), 0)
def test_njit_parallel_chunksize_basic(self):
# Test basic chunksize operations inside njit.
@njit
def get_cs():
return get_parallel_chunksize()
@njit
def set_cs(x):
return set_parallel_chunksize(x)
self.assertEqual(get_cs(), 0)
set_cs(8)
self.assertEqual(get_cs(), 8)
set_cs(0)
self.assertEqual(get_cs(), 0)
def test_njit_with_chunksize(self):
# Test "with parallel_chunksize" inside njit.
@njit
def test_impl(x):
cs1 = get_parallel_chunksize()
with parallel_chunksize(8):
cs2 = get_parallel_chunksize()
cs3 = get_parallel_chunksize()
return cs1, cs2, cs3
cs1, cs2, cs3 = test_impl(8)
self.assertEqual(cs1, 0)
self.assertEqual(cs2, 8)
self.assertEqual(cs3, 0)
def test_all_iterations_reset_chunksize(self):
""" Test that all the iterations get run if you set the
chunksize. Also check that the chunksize that each
worker thread sees has been reset to 0. """
@njit(parallel=True)
def test_impl(cs, n):
res = np.zeros(n)
inner_cs = np.full(n, -13)
with numba.parallel_chunksize(cs):
for i in numba.prange(n):
inner_cs[i] = numba.get_parallel_chunksize()
res[i] = 13
return res, inner_cs
# Test a variety of array and chunk sizes.
# 1000 is a round number, 997 is prime, 943 is product of two
# primes, 961 is square of a prime.
for j in [1000, 997, 943, 961]:
for i in range(15):
res, inner_cs = test_impl(i+1, j)
self.assertTrue(np.all(res == 13))
self.assertTrue(np.all(inner_cs == 0))
def test_njit_parallel_chunksize_negative(self):
# Test negative set_parallel_chunksize inside njit.
with self.assertRaises(ValueError) as raised:
@njit
def neg_test():
set_parallel_chunksize(-1)
neg_test()
msg = "chunksize must be greater than or equal to zero"
self.assertIn(msg, str(raised.exception))
def test_python_parallel_chunksize_negative(self):
# Test negative set_parallel_chunksize outside njit.
with self.assertRaises(ValueError) as raised:
set_parallel_chunksize(-1)
msg = "chunksize must be greater than or equal to zero"
self.assertIn(msg, str(raised.exception))
def test_njit_parallel_chunksize_invalid_type(self):
with self.assertRaises(errors.TypingError) as raised:
@njit
def impl():
set_parallel_chunksize('invalid_type')
impl()
msg = "The parallel chunksize must be an integer"
self.assertIn(msg, str(raised.exception))
def test_python_parallel_chunksize_invalid_type(self):
with self.assertRaises(TypeError) as raised:
set_parallel_chunksize('invalid_type')
msg = "The parallel chunksize must be an integer"
self.assertIn(msg, str(raised.exception))
@skip_parfors_unsupported
@x86_only
class TestParforsVectorizer(TestPrangeBase):
# env mutating test
_numba_parallel_test_ = False
def get_gufunc_asm(self, func, schedule_type, *args, **kwargs):
fastmath = kwargs.pop('fastmath', False)
cpu_name = kwargs.pop('cpu_name', 'skylake-avx512')
assertions = kwargs.pop('assertions', True)
# force LLVM to use zmm registers for vectorization
# https://reviews.llvm.org/D67259
cpu_features = kwargs.pop('cpu_features', '-prefer-256-bit')
env_opts = {'NUMBA_CPU_NAME': cpu_name,
'NUMBA_CPU_FEATURES': cpu_features,
}
overrides = []
for k, v in env_opts.items():
overrides.append(override_env_config(k, v))
with overrides[0], overrides[1]:
sig = tuple([numba.typeof(x) for x in args])
pfunc_vectorizable = self.generate_prange_func(func, None)
if fastmath == True:
cres = self.compile_parallel_fastmath(pfunc_vectorizable, sig)
else:
cres = self.compile_parallel(pfunc_vectorizable, sig)
# get the gufunc asm
asm = self._get_gufunc_asm(cres)
if assertions:
schedty = re.compile('call\s+\w+\*\s+@do_scheduling_(\w+)\(')
matches = schedty.findall(cres.library.get_llvm_str())
self.assertGreaterEqual(len(matches), 1) # at least 1 parfor call
self.assertEqual(matches[0], schedule_type)
self.assertNotEqual(asm, {})
return asm
@linux_only
def test_vectorizer_fastmath_asm(self):
""" This checks that if fastmath is set and the underlying hardware
is suitable, and the function supplied is amenable to fastmath based
vectorization, that the vectorizer actually runs.
"""
# This function will benefit from `fastmath` if run on a suitable
# target. The vectorizer should unwind the loop and generate
# packed dtype=double add and sqrt instructions.
def will_vectorize(A):
n = len(A)
acc = 0
for i in range(n):
acc += np.sqrt(i)
return acc
arg = np.zeros(10)
fast_asm = self.get_gufunc_asm(will_vectorize, 'unsigned', arg,
fastmath=True)
slow_asm = self.get_gufunc_asm(will_vectorize, 'unsigned', arg,
fastmath=False)
for v in fast_asm.values():
# should unwind and call vector sqrt then vector add
# all on packed doubles using zmm's
self.assertTrue('vaddpd' in v)
self.assertTrue('vsqrtpd' in v or '__svml_sqrt' in v)
self.assertTrue('zmm' in v)
for v in slow_asm.values():
# vector variants should not be present
self.assertTrue('vaddpd' not in v)
self.assertTrue('vsqrtpd' not in v)
# check scalar variant is present
self.assertTrue('vsqrtsd' in v and '__svml_sqrt' not in v)
self.assertTrue('vaddsd' in v)
# check no zmm addressing is present
self.assertTrue('zmm' not in v)
@linux_only
def test_unsigned_refusal_to_vectorize(self):
""" This checks that if fastmath is set and the underlying hardware
is suitable, and the function supplied is amenable to fastmath based
vectorization, that the vectorizer actually runs.
"""
def will_not_vectorize(A):
n = len(A)
for i in range(-n, 0):
A[i] = np.sqrt(A[i])
return A
def will_vectorize(A):
n = len(A)
for i in range(n):
A[i] = np.sqrt(A[i])
return A
arg = np.zeros(10)
# Boundschecking breaks vectorization
with override_env_config('NUMBA_BOUNDSCHECK', '0'):
novec_asm = self.get_gufunc_asm(will_not_vectorize, 'signed', arg,
fastmath=True)
vec_asm = self.get_gufunc_asm(will_vectorize, 'unsigned', arg,
fastmath=True)
for v in novec_asm.values():
# vector variant should not be present
self.assertTrue('vsqrtpd' not in v)
# check scalar variant is present
self.assertTrue('vsqrtsd' in v)
# check no zmm addressing is present
self.assertTrue('zmm' not in v)
for v in vec_asm.values():
# should unwind and call vector sqrt then vector mov
# all on packed doubles using zmm's
self.assertTrue('vsqrtpd' in v or '__svml_sqrt' in v)
self.assertTrue('vmovupd' in v)
self.assertTrue('zmm' in v)
@linux_only
# needed as 32bit doesn't have equivalent signed/unsigned instruction
# generation for this function
def test_signed_vs_unsigned_vec_asm(self):
""" This checks vectorization for signed vs unsigned variants of a
trivial accumulator, the only meaningful difference should be the
presence of signed vs. unsigned unpack instructions (for the
induction var).
"""
def signed_variant():
n = 4096
A = 0.
for i in range(-n, 0):
A += i
return A
def unsigned_variant():
n = 4096
A = 0.
for i in range(n):
A += i
return A
# Boundschecking breaks the diff check below because of the pickled exception
with override_env_config('NUMBA_BOUNDSCHECK', '0'):
signed_asm = self.get_gufunc_asm(signed_variant, 'signed',
fastmath=True)
unsigned_asm = self.get_gufunc_asm(unsigned_variant, 'unsigned',
fastmath=True)
def strip_instrs(asm):
acc = []
for x in asm.splitlines():
spd = x.strip()
# filter out anything that isn't a trivial instruction
# and anything with the gufunc id as it contains an address
if spd != '' and not (spd.startswith('.')
or spd.startswith('_')
or spd.startswith('"')
or '__numba_parfor_gufunc' in spd):
acc.append(re.sub('[\t]', '', spd))
return acc
for k, v in signed_asm.items():
signed_instr = strip_instrs(v)
break
for k, v in unsigned_asm.items():
unsigned_instr = strip_instrs(v)
break
from difflib import SequenceMatcher as sm
# make sure that the only difference in instruction (if there is a
# difference) is the char 'u'. For example:
# vcvtsi2sdq vs. vcvtusi2sdq
self.assertEqual(len(signed_instr), len(unsigned_instr))
for a, b in zip(signed_instr, unsigned_instr):
if a == b:
continue
else:
s = sm(lambda x: x == '\t', a, b)
ops = s.get_opcodes()
for op in ops:
if op[0] == 'insert':
self.assertEqual(b[op[-2]:op[-1]], 'u')
@skip_parfors_unsupported
class TestParforReductionSetNumThreads(TestCase):
"""Test execution correctness on reductions with set_num_threads.
"""
def test_add(self):
N = config.NUMBA_NUM_THREADS
M = 2 * N
mask = N - 1
@njit(parallel=True)
def udt(nthreads):
acc = 0
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
set_num_threads(local_mask)
gnt = get_num_threads()
acc += gnt
return acc
expect = udt.py_func(mask)
got = udt(mask)
self.assertPreciseEqual(expect, got)
def test_mul(self):
# This min will prevent larger thread counts from generating
# overflow in the loop below.
N = min(4, config.NUMBA_NUM_THREADS)
M = 2 * N
mask = N - 1
@njit(parallel=True)
def udt(nthreads):
acc = 1
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
set_num_threads(local_mask)
gnt = get_num_threads()
acc *= gnt
return acc
expect = udt.py_func(mask)
got = udt(mask)
self.assertPreciseEqual(expect, got)
def test_max(self):
N = config.NUMBA_NUM_THREADS
M = 2 * N
mask = N - 1
@njit(parallel=True)
def udt(nthreads):
acc = 1
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
set_num_threads(local_mask)
gnt = get_num_threads()
acc = max(acc, gnt)
return acc
expect = udt.py_func(mask)
got = udt(mask)
self.assertPreciseEqual(expect, got)
@skip_parfors_unsupported
class TestDiagnosticEnvVar(TestCase):
@TestCase.run_test_in_subprocess()
def test_diagnostics_env_var1(self):
os.environ['NUMBA_PARALLEL_DIAGNOSTICS']='4'
with captured_stdout() as stdout:
@njit(parallel=True)
def impl():
n = 100
b = np.zeros((n), dtype=np.float64)
for i in prange(n):
b[i] = 1
return b
impl()
the_output = stdout.getvalue()
self.assertIn("Parallel Accelerator Optimizing", the_output)
if __name__ == "__main__":
unittest.main()
|
789f9f7cae89bdba1a614249b54e547af573aad6
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/tests/codegen/fcode/scripts/classes_3.py
|
26776c85fed1b14fbfd8fce0a2c7e8beb4c80917
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401
| 2023-08-28T09:31:32
| 2023-08-28T09:31:32
| 100,463,736
| 307
| 39
|
MIT
| 2023-09-14T19:29:26
| 2017-08-16T07:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 735
|
py
|
classes_3.py
|
# pylint: disable=missing-function-docstring, missing-module-docstring, missing-class-docstring
from numpy import ones
#$ header class Point(public)
#$ header method __init__(Point, [double])
#$ header method __del__(Point)
#$ header method translate(Point, [double])
#$ header class Points(public)
#$ header method __init__(Points, Point)
#$ header method __del__(Points)
class Point(object):
def __init__(self, x):
self.x = x
def __del__(self):
pass
def translate(self, a):
self.x = self.x + a
class Points(object):
def __init__(self, x):
self.x = x
def __del__(self):
pass
x = [1., 1., 1.]
P1 = Point(x)
P2 = Points(P1)
P3 = P2.x
P4 = P2
P5 = P2.x.x
print(x,P5)
|
e4e0b279d16f0740cdc22613de89629688054e5d
|
c50e7eb190802d7849c0d0cea02fb4d2f0021777
|
/src/express-route-cross-connection/azext_expressroutecrossconnection/vendored_sdks/v2018_04_01/models/virtual_network_peering_py3.py
|
fc3489a026b3ddbee39643f330bb79846ad039bf
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/azure-cli-extensions
|
c1615b19930bba7166c282918f166cd40ff6609c
|
b8c2cf97e991adf0c0a207d810316b8f4686dc29
|
refs/heads/main
| 2023-08-24T12:40:15.528432
| 2023-08-24T09:17:25
| 2023-08-24T09:17:25
| 106,580,024
| 336
| 1,226
|
MIT
| 2023-09-14T10:48:57
| 2017-10-11T16:27:31
|
Python
|
UTF-8
|
Python
| false
| false
| 4,750
|
py
|
virtual_network_peering_py3.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource_py3 import SubResource
class VirtualNetworkPeering(SubResource):
"""Peerings in a virtual network resource.
:param id: Resource ID.
:type id: str
:param allow_virtual_network_access: Whether the VMs in the linked virtual
network space would be able to access all the VMs in local Virtual network
space.
:type allow_virtual_network_access: bool
:param allow_forwarded_traffic: Whether the forwarded traffic from the VMs
in the remote virtual network will be allowed/disallowed.
:type allow_forwarded_traffic: bool
:param allow_gateway_transit: If gateway links can be used in remote
virtual networking to link to this virtual network.
:type allow_gateway_transit: bool
:param use_remote_gateways: If remote gateways can be used on this virtual
network. If the flag is set to true, and allowGatewayTransit on remote
peering is also true, virtual network will use gateways of remote virtual
network for transit. Only one peering can have this flag set to true. This
flag cannot be set if virtual network already has a gateway.
:type use_remote_gateways: bool
:param remote_virtual_network: The reference of the remote virtual
network. The remote virtual network can be in the same or different region
(preview). See here to register for the preview and learn more
(https://docs.microsoft.com/azure/virtual-network/virtual-network-create-peering).
:type remote_virtual_network:
~azure.mgmt.network.v2018_04_01.models.SubResource
:param remote_address_space: The reference of the remote virtual network
address space.
:type remote_address_space:
~azure.mgmt.network.v2018_04_01.models.AddressSpace
:param peering_state: The status of the virtual network peering. Possible
values are 'Initiated', 'Connected', and 'Disconnected'. Possible values
include: 'Initiated', 'Connected', 'Disconnected'
:type peering_state: str or
~azure.mgmt.network.v2018_04_01.models.VirtualNetworkPeeringState
:param provisioning_state: The provisioning state of the resource.
:type provisioning_state: str
:param name: The name of the resource that is unique within a resource
group. This name can be used to access the resource.
:type name: str
:param etag: A unique read-only string that changes whenever the resource
is updated.
:type etag: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'allow_virtual_network_access': {'key': 'properties.allowVirtualNetworkAccess', 'type': 'bool'},
'allow_forwarded_traffic': {'key': 'properties.allowForwardedTraffic', 'type': 'bool'},
'allow_gateway_transit': {'key': 'properties.allowGatewayTransit', 'type': 'bool'},
'use_remote_gateways': {'key': 'properties.useRemoteGateways', 'type': 'bool'},
'remote_virtual_network': {'key': 'properties.remoteVirtualNetwork', 'type': 'SubResource'},
'remote_address_space': {'key': 'properties.remoteAddressSpace', 'type': 'AddressSpace'},
'peering_state': {'key': 'properties.peeringState', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, id: str=None, allow_virtual_network_access: bool=None, allow_forwarded_traffic: bool=None, allow_gateway_transit: bool=None, use_remote_gateways: bool=None, remote_virtual_network=None, remote_address_space=None, peering_state=None, provisioning_state: str=None, name: str=None, etag: str=None, **kwargs) -> None:
super(VirtualNetworkPeering, self).__init__(id=id, **kwargs)
self.allow_virtual_network_access = allow_virtual_network_access
self.allow_forwarded_traffic = allow_forwarded_traffic
self.allow_gateway_transit = allow_gateway_transit
self.use_remote_gateways = use_remote_gateways
self.remote_virtual_network = remote_virtual_network
self.remote_address_space = remote_address_space
self.peering_state = peering_state
self.provisioning_state = provisioning_state
self.name = name
self.etag = etag
|
fbc68882dd01fe980bb1aa48e14cc6d899022d41
|
bede13ba6e7f8c2750815df29bb2217228e91ca5
|
/subscription_package/models/sale_order.py
|
ad6b854e2623bae7cd82197476576a4a53580408
|
[] |
no_license
|
CybroOdoo/CybroAddons
|
f44c1c43df1aad348409924603e538aa3abc7319
|
4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14
|
refs/heads/16.0
| 2023-09-01T17:52:04.418982
| 2023-09-01T11:43:47
| 2023-09-01T11:43:47
| 47,947,919
| 209
| 561
| null | 2023-09-14T01:47:59
| 2015-12-14T02:38:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,440
|
py
|
sale_order.py
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
#
# Copyright (C) 2022-TODAY Cybrosys Technologies(<https://www.cybrosys.com>)
# Author: Cybrosys Techno Solutions(<https://www.cybrosys.com>)
#
# You can modify it under the terms of the GNU LESSER
# GENERAL PUBLIC LICENSE (LGPL v3), Version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU LESSER GENERAL PUBLIC LICENSE (LGPL v3) for more details.
#
# You should have received a copy of the GNU LESSER GENERAL PUBLIC LICENSE
# (LGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
from odoo import fields, models, api
class SaleOrder(models.Model):
""" This class is used to inherit sale order"""
_inherit = 'sale.order'
subscription_count = fields.Integer(string='Subscriptions',
compute='_compute_subscription_count')
@api.depends('subscription_count')
def _compute_subscription_count(self):
subscription_count = self.env['subscription.package'].search_count(
[('sale_order', '=', self.id)])
if subscription_count > 0:
self.subscription_count = subscription_count
else:
self.subscription_count = 0
def button_subscription(self):
return {
'name': 'Subscription',
'sale_order': False,
'domain': [('sale_order', '=', self.id)],
'view_type': 'form',
'res_model': 'subscription.package',
'view_mode': 'tree,form',
'type': 'ir.actions.act_window',
'context': {
"create": False
}
}
def _action_confirm(self):
if self.subscription_count != 1:
if self.order_line:
for line in self.order_line:
if line.product_id.is_subscription:
this_products_line = []
rec_list = [0, 0, {'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'unit_price': line.price_unit}]
this_products_line.append(rec_list)
self.env['subscription.package'].create(
{
'sale_order': self.id,
'reference_code': self.env['ir.sequence'].next_by_code('sequence.reference.code'),
'start_date': fields.Date.today(),
'stage_id': self.env.ref('subscription_package.draft_stage').id,
'partner_id': self.partner_id.id,
'plan_id': line.product_id.subscription_plan_id.id,
'product_line_ids': this_products_line
})
return super()._action_confirm()
class SubscriptionInherit(models.Model):
""" This class is used to inherit subscription packages"""
_inherit = 'subscription.package'
sale_order_count = fields.Integer()
|
c52fa231cc5272c6bba1ec9296f6f5eaddd5d329
|
fb1e852da0a026fb59c8cb24aeb40e62005501f1
|
/beit2/run_vqkd_training.py
|
f68854bd49eb0a674a51a21c42f6d7a4568931d3
|
[
"LGPL-2.1-or-later",
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
microsoft/unilm
|
134aa44867c5ed36222220d3f4fd9616d02db573
|
b60c741f746877293bb85eed6806736fc8fa0ffd
|
refs/heads/master
| 2023-08-31T04:09:05.779071
| 2023-08-29T14:07:57
| 2023-08-29T14:07:57
| 198,350,484
| 15,313
| 2,192
|
MIT
| 2023-08-19T11:33:20
| 2019-07-23T04:15:28
|
Python
|
UTF-8
|
Python
| false
| false
| 15,480
|
py
|
run_vqkd_training.py
|
# --------------------------------------------------------
# BEiT v2: Masked Image Modeling with Vector-Quantized Visual Tokenizers (https://arxiv.org/abs/2208.06366)
# Github source: https://github.com/microsoft/unilm/tree/master/beitv2
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Zhiliang Peng
# Based on BEiT, timm, DeiT and DINO code bases
# https://github.com/microsoft/unilm/tree/master/beit
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.models import create_model
from optim_factory import create_optimizer
from datasets import build_vqkd_dataset
from engine_for_vqkd import evaluate, train_one_epoch, calculate_codebook_usage
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
import modeling_vqkd
def get_args():
parser = argparse.ArgumentParser('BEiT pre-training script', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--save_ckpt_freq', default=20, type=int)
# Model parameters
parser.add_argument('--model', default='vqkd_encoder_base_decoder_3x768x12_clip', type=str, metavar='MODEL', help='Name of model to train')
parser.add_argument('--rec_loss_type', default='cosine', type=str, metavar='MODEL',
help='type of loss to calculate reconstruction distance')
parser.add_argument('--codebook_n_emd', default=8192, type=int, metavar='MODEL',
help='number of codebook')
parser.add_argument('--codebook_emd_dim', default=32, type=int, metavar='MODEL',
help='number of codebook')
parser.add_argument('--ema_decay', default=0.99, type=float, metavar='MODEL', help='ema decay for quantizer')
parser.add_argument('--quantize_kmeans_init', action='store_true', help='enable kmeans_init for quantizer')
parser.add_argument('--process_type', default='default', type=str, choices=['default', 'dall-e', 'imagenet_norm'],
help='Image process type (default, dall-e)')
parser.add_argument('--input_size', default=224, type=int, help='images input size for backbone')
# regress feature
parser.add_argument('--teacher_model_type', default='clip', type=str, help='teacher_model_type during training')
parser.add_argument('--teacher_input_size', default=224, type=int, help='teacher_input_size for clip-large p14')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay (default: 1e-4)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD.
(Set the same value with args.weight_decay to keep weight decay no change)""")
parser.add_argument('--lr', type=float, default=5e-5, metavar='LR',
help='learning rate (default: 5e-5)')
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='epochs to warmup LR, if scheduler supports')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0., metavar='PCT',
help='Color jitter factor (default: 0.)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic, lanczos default: "bicubic")')
parser.add_argument('--min_crop_scale', type=float, default=0.08, metavar='PCT',
help='min_crop_scale (default: 0.08)')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--eval_data_path', default='', type=str, help='dataset path')
parser.add_argument('--data_set', default='image_folder', type=str, help='dataset path')
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--dist_eval', action='store_true', default=True,
help='Enabling distributed evaluation')
parser.add_argument('--disable_eval', action='store_true', default=False)
parser.add_argument('--eval', action='store_true', default=False, help="Perform evaluation only")
parser.add_argument('--calculate_codebook_usage', action='store_true', default=False)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser.parse_args()
def get_model(args, **kwargs):
model = create_model(
args.model,
pretrained=False,
as_tokenzer=False,
n_code=args.codebook_n_emd,
code_dim=args.codebook_emd_dim,
img_size=args.input_size,
rec_loss_type=args.rec_loss_type,
teacher_model_type=args.teacher_model_type,
teacher_input_size=args.teacher_input_size,
decay=args.ema_decay,
quantize_kmeans_init=args.quantize_kmeans_init,
process_type=args.process_type
)
return model
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
model = get_model(args)
# get dataset
dataset_train = build_vqkd_dataset(is_train=True, args=args)
if args.disable_eval:
dataset_val = None
else:
dataset_val = build_vqkd_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_rank = global_rank
num_training_steps_per_epoch = len(dataset_train) // args.batch_size // num_tasks
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
else:
data_loader_val = None
model.to(device)
model_without_ddp = model
if not args.eval:
print("Model = %s" % str(model_without_ddp))
for part in ['encoder', 'decoder']:
model_part = eval(f"model.{part}")
n_learnable_parameters = sum(p.numel() for p in model_part.parameters() if p.requires_grad)
n_fix_parameters = sum(p.numel() for p in model_part.parameters() if not p.requires_grad)
print(f'number of learnable params in model.{part}: {n_learnable_parameters / 1e6} M')
print(f'number of fixed params in model.{part}: {n_fix_parameters / 1e6} M')
n_learnable_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
n_fix_parameters = sum(p.numel() for p in model.parameters() if not p.requires_grad)
print(f'total number of learnable params: {n_learnable_parameters / 1e6} M')
print(f'total number of fixed params in : {n_fix_parameters / 1e6} M')
total_batch_size = args.batch_size * utils.get_world_size()
args.lr = total_batch_size / 128 * args.lr
print("LR = %.8f" % args.lr)
print("Min LR = %.8f" % args.min_lr)
print("Weigth Decay = %.8f" % args.weight_decay)
print("Batch size = %d" % total_batch_size)
print("Number of training steps = %d" % num_training_steps_per_epoch)
print("Number of training examples per epoch = %d" % (total_batch_size * num_training_steps_per_epoch))
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
print("Use step level LR & WD scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
)
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
if args.eval:
test_stats = evaluate(data_loader_val, model, device, log_writer, 0, args=args)
exit(0)
if args.calculate_codebook_usage:
test_stats = calculate_codebook_usage(data_loader_val, model, device, log_writer, 0, args=args)
exit(0)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch)
train_stats = train_one_epoch(
model,
data_loader_train,
optimizer,
device,
epoch,
loss_scaler,
args.clip_grad,
log_writer=log_writer,
start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values,
args=args
)
if args.output_dir:
# if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, save_ckpt_freq=args.save_ckpt_freq)
if data_loader_val is not None:
test_stats = evaluate(data_loader_val, model, device, log_writer, epoch, args=args)
print(f"Validation loss of the network on the {len(dataset_val)} test images: {test_stats['loss']:.4f}")
if log_writer is not None:
log_writer.update(**test_stats, head="val/loss")
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch, 'n_parameters': n_learnable_parameters}
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch, 'n_parameters': n_learnable_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.