blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7829f0b42b7974fab1bf92fa95c132771c2ebca9
|
aeef2494b283012ed619870c4275e7d015f4017a
|
/sdk/python/pulumi_gcp/compute/packet_mirroring.py
|
d44ac168c65dfbe7bd3ea8a546f507358f885c30
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-gcp
|
d4fd3f80c3df5290edaf33eb5eafe34e6699d0ff
|
7deea0a50a4ee5ab7bd722a83eca01707e298f85
|
refs/heads/master
| 2023-08-31T07:12:45.921522
| 2023-08-31T06:16:27
| 2023-08-31T06:16:27
| 97,485,806
| 160
| 63
|
Apache-2.0
| 2023-09-14T19:49:36
| 2017-07-17T14:28:37
|
Java
|
UTF-8
|
Python
| false
| false
| 30,651
|
py
|
packet_mirroring.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['PacketMirroringArgs', 'PacketMirroring']
@pulumi.input_type
class PacketMirroringArgs:
def __init__(__self__, *,
collector_ilb: pulumi.Input['PacketMirroringCollectorIlbArgs'],
mirrored_resources: pulumi.Input['PacketMirroringMirroredResourcesArgs'],
network: pulumi.Input['PacketMirroringNetworkArgs'],
description: Optional[pulumi.Input[str]] = None,
filter: Optional[pulumi.Input['PacketMirroringFilterArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
priority: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a PacketMirroring resource.
:param pulumi.Input['PacketMirroringCollectorIlbArgs'] collector_ilb: The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL)
that will be used as collector for mirrored traffic. The
specified forwarding rule must have is_mirroring_collector
set to true.
Structure is documented below.
:param pulumi.Input['PacketMirroringMirroredResourcesArgs'] mirrored_resources: A means of specifying which resources to mirror.
Structure is documented below.
:param pulumi.Input['PacketMirroringNetworkArgs'] network: Specifies the mirrored VPC network. Only packets in this network
will be mirrored. All mirrored VMs should have a NIC in the given
network. All mirrored subnetworks should belong to the given network.
Structure is documented below.
:param pulumi.Input[str] description: A human-readable description of the rule.
:param pulumi.Input['PacketMirroringFilterArgs'] filter: A filter for mirrored traffic. If unset, all traffic is mirrored.
Structure is documented below.
:param pulumi.Input[str] name: The name of the packet mirroring rule
:param pulumi.Input[int] priority: Since only one rule can be active at a time, priority is
used to break ties in the case of two rules that apply to
the same instances.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Region in which the created address should reside.
If it is not provided, the provider region is used.
"""
pulumi.set(__self__, "collector_ilb", collector_ilb)
pulumi.set(__self__, "mirrored_resources", mirrored_resources)
pulumi.set(__self__, "network", network)
if description is not None:
pulumi.set(__self__, "description", description)
if filter is not None:
pulumi.set(__self__, "filter", filter)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="collectorIlb")
def collector_ilb(self) -> pulumi.Input['PacketMirroringCollectorIlbArgs']:
"""
The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL)
that will be used as collector for mirrored traffic. The
specified forwarding rule must have is_mirroring_collector
set to true.
Structure is documented below.
"""
return pulumi.get(self, "collector_ilb")
@collector_ilb.setter
def collector_ilb(self, value: pulumi.Input['PacketMirroringCollectorIlbArgs']):
pulumi.set(self, "collector_ilb", value)
@property
@pulumi.getter(name="mirroredResources")
def mirrored_resources(self) -> pulumi.Input['PacketMirroringMirroredResourcesArgs']:
"""
A means of specifying which resources to mirror.
Structure is documented below.
"""
return pulumi.get(self, "mirrored_resources")
@mirrored_resources.setter
def mirrored_resources(self, value: pulumi.Input['PacketMirroringMirroredResourcesArgs']):
pulumi.set(self, "mirrored_resources", value)
@property
@pulumi.getter
def network(self) -> pulumi.Input['PacketMirroringNetworkArgs']:
"""
Specifies the mirrored VPC network. Only packets in this network
will be mirrored. All mirrored VMs should have a NIC in the given
network. All mirrored subnetworks should belong to the given network.
Structure is documented below.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: pulumi.Input['PacketMirroringNetworkArgs']):
pulumi.set(self, "network", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A human-readable description of the rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input['PacketMirroringFilterArgs']]:
"""
A filter for mirrored traffic. If unset, all traffic is mirrored.
Structure is documented below.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input['PacketMirroringFilterArgs']]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the packet mirroring rule
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Since only one rule can be active at a time, priority is
used to break ties in the case of two rules that apply to
the same instances.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The Region in which the created address should reside.
If it is not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
@pulumi.input_type
class _PacketMirroringState:
def __init__(__self__, *,
collector_ilb: Optional[pulumi.Input['PacketMirroringCollectorIlbArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
filter: Optional[pulumi.Input['PacketMirroringFilterArgs']] = None,
mirrored_resources: Optional[pulumi.Input['PacketMirroringMirroredResourcesArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input['PacketMirroringNetworkArgs']] = None,
priority: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering PacketMirroring resources.
:param pulumi.Input['PacketMirroringCollectorIlbArgs'] collector_ilb: The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL)
that will be used as collector for mirrored traffic. The
specified forwarding rule must have is_mirroring_collector
set to true.
Structure is documented below.
:param pulumi.Input[str] description: A human-readable description of the rule.
:param pulumi.Input['PacketMirroringFilterArgs'] filter: A filter for mirrored traffic. If unset, all traffic is mirrored.
Structure is documented below.
:param pulumi.Input['PacketMirroringMirroredResourcesArgs'] mirrored_resources: A means of specifying which resources to mirror.
Structure is documented below.
:param pulumi.Input[str] name: The name of the packet mirroring rule
:param pulumi.Input['PacketMirroringNetworkArgs'] network: Specifies the mirrored VPC network. Only packets in this network
will be mirrored. All mirrored VMs should have a NIC in the given
network. All mirrored subnetworks should belong to the given network.
Structure is documented below.
:param pulumi.Input[int] priority: Since only one rule can be active at a time, priority is
used to break ties in the case of two rules that apply to
the same instances.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Region in which the created address should reside.
If it is not provided, the provider region is used.
"""
if collector_ilb is not None:
pulumi.set(__self__, "collector_ilb", collector_ilb)
if description is not None:
pulumi.set(__self__, "description", description)
if filter is not None:
pulumi.set(__self__, "filter", filter)
if mirrored_resources is not None:
pulumi.set(__self__, "mirrored_resources", mirrored_resources)
if name is not None:
pulumi.set(__self__, "name", name)
if network is not None:
pulumi.set(__self__, "network", network)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if project is not None:
pulumi.set(__self__, "project", project)
if region is not None:
pulumi.set(__self__, "region", region)
@property
@pulumi.getter(name="collectorIlb")
def collector_ilb(self) -> Optional[pulumi.Input['PacketMirroringCollectorIlbArgs']]:
"""
The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL)
that will be used as collector for mirrored traffic. The
specified forwarding rule must have is_mirroring_collector
set to true.
Structure is documented below.
"""
return pulumi.get(self, "collector_ilb")
@collector_ilb.setter
def collector_ilb(self, value: Optional[pulumi.Input['PacketMirroringCollectorIlbArgs']]):
pulumi.set(self, "collector_ilb", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A human-readable description of the rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def filter(self) -> Optional[pulumi.Input['PacketMirroringFilterArgs']]:
"""
A filter for mirrored traffic. If unset, all traffic is mirrored.
Structure is documented below.
"""
return pulumi.get(self, "filter")
@filter.setter
def filter(self, value: Optional[pulumi.Input['PacketMirroringFilterArgs']]):
pulumi.set(self, "filter", value)
@property
@pulumi.getter(name="mirroredResources")
def mirrored_resources(self) -> Optional[pulumi.Input['PacketMirroringMirroredResourcesArgs']]:
"""
A means of specifying which resources to mirror.
Structure is documented below.
"""
return pulumi.get(self, "mirrored_resources")
@mirrored_resources.setter
def mirrored_resources(self, value: Optional[pulumi.Input['PacketMirroringMirroredResourcesArgs']]):
pulumi.set(self, "mirrored_resources", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the packet mirroring rule
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def network(self) -> Optional[pulumi.Input['PacketMirroringNetworkArgs']]:
"""
Specifies the mirrored VPC network. Only packets in this network
will be mirrored. All mirrored VMs should have a NIC in the given
network. All mirrored subnetworks should belong to the given network.
Structure is documented below.
"""
return pulumi.get(self, "network")
@network.setter
def network(self, value: Optional[pulumi.Input['PacketMirroringNetworkArgs']]):
pulumi.set(self, "network", value)
@property
@pulumi.getter
def priority(self) -> Optional[pulumi.Input[int]]:
"""
Since only one rule can be active at a time, priority is
used to break ties in the case of two rules that apply to
the same instances.
"""
return pulumi.get(self, "priority")
@priority.setter
def priority(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "priority", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def region(self) -> Optional[pulumi.Input[str]]:
"""
The Region in which the created address should reside.
If it is not provided, the provider region is used.
"""
return pulumi.get(self, "region")
@region.setter
def region(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "region", value)
class PacketMirroring(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
collector_ilb: Optional[pulumi.Input[pulumi.InputType['PacketMirroringCollectorIlbArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
filter: Optional[pulumi.Input[pulumi.InputType['PacketMirroringFilterArgs']]] = None,
mirrored_resources: Optional[pulumi.Input[pulumi.InputType['PacketMirroringMirroredResourcesArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[pulumi.InputType['PacketMirroringNetworkArgs']]] = None,
priority: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Packet Mirroring mirrors traffic to and from particular VM instances.
You can use the collected traffic to help you detect security threats
and monitor application performance.
To get more information about PacketMirroring, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/packetMirrorings)
* How-to Guides
* [Using Packet Mirroring](https://cloud.google.com/vpc/docs/using-packet-mirroring#creating)
## Example Usage
## Import
PacketMirroring can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/packetMirroring:PacketMirroring default projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}
```
```sh
$ pulumi import gcp:compute/packetMirroring:PacketMirroring default {{project}}/{{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/packetMirroring:PacketMirroring default {{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/packetMirroring:PacketMirroring default {{name}}
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PacketMirroringCollectorIlbArgs']] collector_ilb: The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL)
that will be used as collector for mirrored traffic. The
specified forwarding rule must have is_mirroring_collector
set to true.
Structure is documented below.
:param pulumi.Input[str] description: A human-readable description of the rule.
:param pulumi.Input[pulumi.InputType['PacketMirroringFilterArgs']] filter: A filter for mirrored traffic. If unset, all traffic is mirrored.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['PacketMirroringMirroredResourcesArgs']] mirrored_resources: A means of specifying which resources to mirror.
Structure is documented below.
:param pulumi.Input[str] name: The name of the packet mirroring rule
:param pulumi.Input[pulumi.InputType['PacketMirroringNetworkArgs']] network: Specifies the mirrored VPC network. Only packets in this network
will be mirrored. All mirrored VMs should have a NIC in the given
network. All mirrored subnetworks should belong to the given network.
Structure is documented below.
:param pulumi.Input[int] priority: Since only one rule can be active at a time, priority is
used to break ties in the case of two rules that apply to
the same instances.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Region in which the created address should reside.
If it is not provided, the provider region is used.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: PacketMirroringArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Packet Mirroring mirrors traffic to and from particular VM instances.
You can use the collected traffic to help you detect security threats
and monitor application performance.
To get more information about PacketMirroring, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/packetMirrorings)
* How-to Guides
* [Using Packet Mirroring](https://cloud.google.com/vpc/docs/using-packet-mirroring#creating)
## Example Usage
## Import
PacketMirroring can be imported using any of these accepted formats
```sh
$ pulumi import gcp:compute/packetMirroring:PacketMirroring default projects/{{project}}/regions/{{region}}/packetMirrorings/{{name}}
```
```sh
$ pulumi import gcp:compute/packetMirroring:PacketMirroring default {{project}}/{{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/packetMirroring:PacketMirroring default {{region}}/{{name}}
```
```sh
$ pulumi import gcp:compute/packetMirroring:PacketMirroring default {{name}}
```
:param str resource_name: The name of the resource.
:param PacketMirroringArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(PacketMirroringArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
collector_ilb: Optional[pulumi.Input[pulumi.InputType['PacketMirroringCollectorIlbArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
filter: Optional[pulumi.Input[pulumi.InputType['PacketMirroringFilterArgs']]] = None,
mirrored_resources: Optional[pulumi.Input[pulumi.InputType['PacketMirroringMirroredResourcesArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[pulumi.InputType['PacketMirroringNetworkArgs']]] = None,
priority: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = PacketMirroringArgs.__new__(PacketMirroringArgs)
if collector_ilb is None and not opts.urn:
raise TypeError("Missing required property 'collector_ilb'")
__props__.__dict__["collector_ilb"] = collector_ilb
__props__.__dict__["description"] = description
__props__.__dict__["filter"] = filter
if mirrored_resources is None and not opts.urn:
raise TypeError("Missing required property 'mirrored_resources'")
__props__.__dict__["mirrored_resources"] = mirrored_resources
__props__.__dict__["name"] = name
if network is None and not opts.urn:
raise TypeError("Missing required property 'network'")
__props__.__dict__["network"] = network
__props__.__dict__["priority"] = priority
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
super(PacketMirroring, __self__).__init__(
'gcp:compute/packetMirroring:PacketMirroring',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
collector_ilb: Optional[pulumi.Input[pulumi.InputType['PacketMirroringCollectorIlbArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
filter: Optional[pulumi.Input[pulumi.InputType['PacketMirroringFilterArgs']]] = None,
mirrored_resources: Optional[pulumi.Input[pulumi.InputType['PacketMirroringMirroredResourcesArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
network: Optional[pulumi.Input[pulumi.InputType['PacketMirroringNetworkArgs']]] = None,
priority: Optional[pulumi.Input[int]] = None,
project: Optional[pulumi.Input[str]] = None,
region: Optional[pulumi.Input[str]] = None) -> 'PacketMirroring':
"""
Get an existing PacketMirroring resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['PacketMirroringCollectorIlbArgs']] collector_ilb: The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL)
that will be used as collector for mirrored traffic. The
specified forwarding rule must have is_mirroring_collector
set to true.
Structure is documented below.
:param pulumi.Input[str] description: A human-readable description of the rule.
:param pulumi.Input[pulumi.InputType['PacketMirroringFilterArgs']] filter: A filter for mirrored traffic. If unset, all traffic is mirrored.
Structure is documented below.
:param pulumi.Input[pulumi.InputType['PacketMirroringMirroredResourcesArgs']] mirrored_resources: A means of specifying which resources to mirror.
Structure is documented below.
:param pulumi.Input[str] name: The name of the packet mirroring rule
:param pulumi.Input[pulumi.InputType['PacketMirroringNetworkArgs']] network: Specifies the mirrored VPC network. Only packets in this network
will be mirrored. All mirrored VMs should have a NIC in the given
network. All mirrored subnetworks should belong to the given network.
Structure is documented below.
:param pulumi.Input[int] priority: Since only one rule can be active at a time, priority is
used to break ties in the case of two rules that apply to
the same instances.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] region: The Region in which the created address should reside.
If it is not provided, the provider region is used.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _PacketMirroringState.__new__(_PacketMirroringState)
__props__.__dict__["collector_ilb"] = collector_ilb
__props__.__dict__["description"] = description
__props__.__dict__["filter"] = filter
__props__.__dict__["mirrored_resources"] = mirrored_resources
__props__.__dict__["name"] = name
__props__.__dict__["network"] = network
__props__.__dict__["priority"] = priority
__props__.__dict__["project"] = project
__props__.__dict__["region"] = region
return PacketMirroring(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="collectorIlb")
def collector_ilb(self) -> pulumi.Output['outputs.PacketMirroringCollectorIlb']:
"""
The Forwarding Rule resource (of type load_balancing_scheme=INTERNAL)
that will be used as collector for mirrored traffic. The
specified forwarding rule must have is_mirroring_collector
set to true.
Structure is documented below.
"""
return pulumi.get(self, "collector_ilb")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
A human-readable description of the rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def filter(self) -> pulumi.Output[Optional['outputs.PacketMirroringFilter']]:
"""
A filter for mirrored traffic. If unset, all traffic is mirrored.
Structure is documented below.
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter(name="mirroredResources")
def mirrored_resources(self) -> pulumi.Output['outputs.PacketMirroringMirroredResources']:
"""
A means of specifying which resources to mirror.
Structure is documented below.
"""
return pulumi.get(self, "mirrored_resources")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the packet mirroring rule
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def network(self) -> pulumi.Output['outputs.PacketMirroringNetwork']:
"""
Specifies the mirrored VPC network. Only packets in this network
will be mirrored. All mirrored VMs should have a NIC in the given
network. All mirrored subnetworks should belong to the given network.
Structure is documented below.
"""
return pulumi.get(self, "network")
@property
@pulumi.getter
def priority(self) -> pulumi.Output[int]:
"""
Since only one rule can be active at a time, priority is
used to break ties in the case of two rules that apply to
the same instances.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter
def region(self) -> pulumi.Output[str]:
"""
The Region in which the created address should reside.
If it is not provided, the provider region is used.
"""
return pulumi.get(self, "region")
|
1d739936d18b876fc9d33a4acfd59df02a6d52da
|
bdf0d4d3aac186af3ad0ad6ac9f380f9a0573fba
|
/aries_cloudagent/ledger/multiple_ledger/tests/test_indy_ledger_requests.py
|
f087660fa58ca054ad6cbb33236bab3fb988190e
|
[
"LicenseRef-scancode-dco-1.1",
"Apache-2.0"
] |
permissive
|
hyperledger/aries-cloudagent-python
|
f25d961e0717a4d703bf43df3e4b4bc8ec07b908
|
39cac36d8937ce84a9307ce100aaefb8bc05ec04
|
refs/heads/main
| 2023-09-01T15:37:05.353674
| 2023-08-31T14:13:06
| 2023-08-31T14:13:06
| 193,556,007
| 370
| 530
|
Apache-2.0
| 2023-09-14T17:59:34
| 2019-06-24T18:12:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,136
|
py
|
test_indy_ledger_requests.py
|
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from ....core.in_memory import InMemoryProfile
from ...base import BaseLedger
from ...multiple_ledger.base_manager import (
BaseMultipleLedgerManager,
MultipleLedgerManagerError,
)
from ...indy import IndySdkLedger, IndySdkLedgerPool
from ..ledger_requests_executor import IndyLedgerRequestsExecutor
class TestIndyLedgerRequestsExecutor(AsyncTestCase):
async def setUp(self):
self.profile = InMemoryProfile.test_profile()
self.context = self.profile.context
setattr(self.context, "profile", self.profile)
self.profile.settings["ledger.ledger_config_list"] = [
{
"id": "test_prod_1",
"pool_name": "test_prod_1",
"is_production": True,
"genesis_transactions": "genesis_transactions",
}
]
self.ledger = IndySdkLedger(
IndySdkLedgerPool("test_prod_1", checked=True), self.profile
)
self.profile.context.injector.bind_instance(
BaseMultipleLedgerManager,
async_mock.MagicMock(
extract_did_from_identifier=async_mock.CoroutineMock(
return_value="WgWxqztrNooG92RXvxSTWv"
),
lookup_did_in_configured_ledgers=async_mock.CoroutineMock(
return_value=("test_prod_1", self.ledger)
),
get_ledger_inst_by_id=async_mock.CoroutineMock(
return_value=self.ledger
),
),
)
self.profile.context.injector.bind_instance(BaseLedger, self.ledger)
self.indy_ledger_requestor = IndyLedgerRequestsExecutor(self.profile)
async def test_get_ledger_for_identifier(self):
(
ledger_id,
ledger_inst,
) = await self.indy_ledger_requestor.get_ledger_for_identifier(
"WgWxqztrNooG92RXvxSTWv:2:schema_name:1.0", 0
)
assert ledger_id == "test_prod_1"
assert ledger_inst.pool.name == "test_prod_1"
async def test_get_ledger_inst(self):
ledger_inst = await self.indy_ledger_requestor.get_ledger_inst("test_prod_1")
assert ledger_inst
async def test_get_ledger_for_identifier_is_digit(self):
ledger_id, ledger = await self.indy_ledger_requestor.get_ledger_for_identifier(
"123", 0
)
assert ledger_id is None
assert ledger == self.ledger
async def test_get_ledger_for_identifier_x(self):
self.profile.context.injector.bind_instance(
BaseMultipleLedgerManager,
async_mock.MagicMock(
extract_did_from_identifier=async_mock.CoroutineMock(
return_value="WgWxqztrNooG92RXvxSTWv"
),
lookup_did_in_configured_ledgers=async_mock.CoroutineMock(
side_effect=MultipleLedgerManagerError
),
),
)
self.indy_ledger_requestor = IndyLedgerRequestsExecutor(self.profile)
ledger_id, ledger = await self.indy_ledger_requestor.get_ledger_for_identifier(
"WgWxqztrNooG92RXvxSTWv:2:schema_name:1.0", 0
)
assert ledger_id is None
assert ledger == self.ledger
async def test_get_ledger_for_identifier_mult_ledger_not_set(self):
self.profile.settings["ledger.ledger_config_list"] = None
self.indy_ledger_requestor = IndyLedgerRequestsExecutor(self.profile)
ledger_id, ledger = await self.indy_ledger_requestor.get_ledger_for_identifier(
"WgWxqztrNooG92RXvxSTWv:2:schema_name:1.0", 0
)
assert ledger_id is None
assert ledger == self.ledger
async def test_get_ledger_for_identifier_mult_ledger_not_cached(self):
(
ledger_id,
ledger_inst,
) = await self.indy_ledger_requestor.get_ledger_for_identifier(
"GUTK6XARozQCWxqzPSUr4g", 4
)
assert ledger_id == "test_prod_1"
assert ledger_inst.pool.name == "test_prod_1"
|
d58443633615de899838c79bdc340be97348dcc4
|
a96333bb48c34d18b7a99b2c724655dbc1fe2dbb
|
/python/tests/test_ppo.py
|
8de54b17d51f14a61480440cb1dc36aeadfbe931
|
[
"MIT"
] |
permissive
|
udacity/deep-reinforcement-learning
|
cdcdbf5e315659d9980866560882930a433b9062
|
561eec3ae8678a23a4557f1a15414a9b076fdfff
|
refs/heads/master
| 2023-07-08T06:33:54.653113
| 2021-12-06T22:42:31
| 2021-12-06T22:42:31
| 140,018,843
| 4,837
| 2,575
|
MIT
| 2023-06-28T19:36:57
| 2018-07-06T18:36:23
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 13,375
|
py
|
test_ppo.py
|
import unittest.mock as mock
import pytest
import numpy as np
import tensorflow as tf
from unitytrainers.ppo.models import PPOModel
from unitytrainers.ppo.trainer import discount_rewards
from unityagents import UnityEnvironment
from .mock_communicator import MockCommunicator
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_cc_vector(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=False, visual_inputs=0)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.probs, model.value, model.entropy,
model.learning_rate]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_cc_visual(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=False, visual_inputs=2)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.probs, model.value, model.entropy,
model.learning_rate]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_dc_visual(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=True, visual_inputs=2)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.all_probs, model.value, model.entropy,
model.learning_rate]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3])
}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_dc_vector(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=True, visual_inputs=0)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"])
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.all_probs, model.value, model.entropy,
model.learning_rate]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_dc_vector_rnn(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=True, visual_inputs=0)
env = UnityEnvironment(' ')
memory_size = 128
model = PPOModel(env.brains["RealFakeBrain"], use_recurrent=True, m_size=memory_size)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.all_probs, model.value, model.entropy,
model.learning_rate, model.memory_out]
feed_dict = {model.batch_size: 1,
model.sequence_length: 2,
model.prev_action: [0, 0],
model.memory_in: np.zeros((1, memory_size)),
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_cc_vector_rnn(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=False, visual_inputs=0)
env = UnityEnvironment(' ')
memory_size = 128
model = PPOModel(env.brains["RealFakeBrain"], use_recurrent=True, m_size=memory_size)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.all_probs, model.value, model.entropy,
model.learning_rate, model.memory_out]
feed_dict = {model.batch_size: 1,
model.sequence_length: 2,
model.memory_in: np.zeros((1, memory_size)),
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]])}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_dc_vector_curio(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=True, visual_inputs=0)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"], use_curiosity=True)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.all_probs, model.value, model.entropy,
model.learning_rate, model.intrinsic_reward]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.next_vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.action_holder: [0, 0]}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_cc_vector_curio(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=False, visual_inputs=0)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"], use_curiosity=True)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.all_probs, model.value, model.entropy,
model.learning_rate, model.intrinsic_reward]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.next_vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.output: [[0.0, 0.0], [0.0, 0.0]]}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_dc_visual_curio(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=True, visual_inputs=2)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"], use_curiosity=True)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.all_probs, model.value, model.entropy,
model.learning_rate, model.intrinsic_reward]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.next_vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.action_holder: [0, 0],
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3]),
model.next_visual_in[0]: np.ones([2, 40, 30, 3]),
model.next_visual_in[1]: np.ones([2, 40, 30, 3])
}
sess.run(run_list, feed_dict=feed_dict)
env.close()
@mock.patch('unityagents.UnityEnvironment.executable_launcher')
@mock.patch('unityagents.UnityEnvironment.get_communicator')
def test_ppo_model_cc_visual_curio(mock_communicator, mock_launcher):
tf.reset_default_graph()
with tf.Session() as sess:
with tf.variable_scope("FakeGraphScope"):
mock_communicator.return_value = MockCommunicator(
discrete_action=False, visual_inputs=2)
env = UnityEnvironment(' ')
model = PPOModel(env.brains["RealFakeBrain"], use_curiosity=True)
init = tf.global_variables_initializer()
sess.run(init)
run_list = [model.output, model.all_probs, model.value, model.entropy,
model.learning_rate, model.intrinsic_reward]
feed_dict = {model.batch_size: 2,
model.sequence_length: 1,
model.vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.next_vector_in: np.array([[1, 2, 3, 1, 2, 3],
[3, 4, 5, 3, 4, 5]]),
model.output: [[0.0, 0.0], [0.0, 0.0]],
model.visual_in[0]: np.ones([2, 40, 30, 3]),
model.visual_in[1]: np.ones([2, 40, 30, 3]),
model.next_visual_in[0]: np.ones([2, 40, 30, 3]),
model.next_visual_in[1]: np.ones([2, 40, 30, 3])
}
sess.run(run_list, feed_dict=feed_dict)
env.close()
def test_rl_functions():
rewards = np.array([0.0, 0.0, 0.0, 1.0])
gamma = 0.9
returns = discount_rewards(rewards, gamma, 0.0)
np.testing.assert_array_almost_equal(returns, np.array([0.729, 0.81, 0.9, 1.0]))
if __name__ == '__main__':
pytest.main()
|
646dcd2830268be00eeae846f87cdf1803358bf5
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/hummingbot/connector/utilities/oms_connector/oms_connector_constants.py
|
219df0feaf48ef17c46b66191d5987085e87bdc6
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 5,724
|
py
|
oms_connector_constants.py
|
from hummingbot.core.api_throttler.data_types import LinkedLimitWeightPair, RateLimit
from hummingbot.core.data_type.common import OrderType, TradeType
from hummingbot.core.data_type.in_flight_order import OrderState
MAX_ID_BIT_COUNT = 63 # experimentally, 64 bit ints sometimes result in OMS assigning order IDs of zero
MAX_ORDER_NOT_FOUND_ON_CANCEL = 2
# rest endpoints
REST_AUTH_ENDPOINT = "Authenticate"
REST_PRODUCTS_ENDPOINT = "GetInstruments"
REST_GET_L1_ENDPOINT = "GetLevel1"
REST_GET_L2_SNAPSHOT_ENDPOINT = "GetL2Snapshot"
REST_PING_ENDPOINT = "Ping"
REST_ORDER_CREATION_ENDPOINT = "SendOrder"
REST_ORDER_STATUS_ENDPOINT = "GetOrderStatus"
REST_ORDER_CANCELATION_ENDPOINT = "CancelOrder"
REST_ACC_POSITIONS_ENDPOINT = "GetAccountPositions"
REST_TRADE_HISTORY_ENDPOINT = "GetTradesHistory"
_ALL_REST_ENDPOINTS = [
REST_AUTH_ENDPOINT,
REST_PRODUCTS_ENDPOINT,
REST_GET_L1_ENDPOINT,
REST_GET_L2_SNAPSHOT_ENDPOINT,
REST_PING_ENDPOINT,
REST_ORDER_CREATION_ENDPOINT,
REST_ORDER_STATUS_ENDPOINT,
REST_ORDER_CANCELATION_ENDPOINT,
REST_ACC_POSITIONS_ENDPOINT,
REST_TRADE_HISTORY_ENDPOINT,
]
# ws endpoints
WS_AUTH_ENDPOINT = "AuthenticateUser"
WS_ACC_EVENTS_ENDPOINT = "SubscribeAccountEvents"
WS_TRADES_SUB_ENDPOINT = "SubscribeTrades"
WS_L2_SUB_ENDPOINT = "SubscribeLevel2"
WS_PING_REQUEST = "Ping"
_ALL_WS_ENDPOINTS = [
WS_AUTH_ENDPOINT,
WS_ACC_EVENTS_ENDPOINT,
WS_TRADES_SUB_ENDPOINT,
WS_L2_SUB_ENDPOINT,
WS_PING_REQUEST,
]
# ws events
WS_L2_EVENT = "Level2UpdateEvent"
WS_ACC_POS_EVENT = "AccountPositionEvent"
WS_ORDER_STATE_EVENT = "OrderStateEvent"
WS_ORDER_TRADE_EVENT = "OrderTradeEvent"
WS_CANCEL_ORDER_REJECTED_EVENT = "CancelOrderRejectEvent"
# limits
REST_REQ_LIMIT_ID = "WSReqLimitID"
REST_REQ_LIMIT = 5_000
WS_REQ_LIMIT_ID = "WSReqLimitID"
WS_REQ_LIMIT = 500_000
RATE_LIMITS = [
RateLimit(REST_AUTH_ENDPOINT, limit=5_000, time_interval=60),
RateLimit(REST_REQ_LIMIT_ID, limit=REST_REQ_LIMIT, time_interval=60),
RateLimit(WS_AUTH_ENDPOINT, limit=50_000, time_interval=60),
RateLimit(WS_REQ_LIMIT_ID, limit=WS_REQ_LIMIT, time_interval=60),
]
for e in _ALL_REST_ENDPOINTS:
RATE_LIMITS.append( # each limit defined separately so that children can be more granular
RateLimit(
limit_id=e,
limit=REST_REQ_LIMIT,
time_interval=60,
linked_limits=[LinkedLimitWeightPair(limit_id=REST_REQ_LIMIT_ID)],
)
)
for e in _ALL_WS_ENDPOINTS: # noqa: F821
RATE_LIMITS.append( # each limit defined separately so that children can be more granular
RateLimit(
limit_id=e,
limit=WS_REQ_LIMIT,
time_interval=60,
linked_limits=[LinkedLimitWeightPair(limit_id=WS_REQ_LIMIT_ID)]
)
)
# endpoint constant settings
MAX_L2_SNAPSHOT_DEPTH = 400
INCLUDE_LAST_COUNT = 0
# msg types
REQ_MSG_TYPE = 0
RESP_MSG_TYPE = 1
EVENT_MSG_TYPE = 3
ERROR_MSG_TYPE = 5
# time in force types
GTC_TIF = 1
# order types
LIMIT_ORDER_TYPE = 2
ORDER_TYPES = {
OrderType.LIMIT: LIMIT_ORDER_TYPE
}
# order actions
BUY_ACTION = 0
SELL_ACTION = 1
ORDER_SIDE_MAP = {
"Buy": TradeType.BUY,
"Sell": TradeType.SELL,
}
# order state
ACTIVE_ORDER_STATE = "Working" # can be either OPEN or PARTIALLY_FILLED
CANCELED_ORDER_STATE = "Canceled"
REJECTED_ORDER_STATE = "Rejected"
EXPIRED_ORDER_STATE = "Expired"
FULLY_EXECUTED_ORDER_STATE = "FullyExecuted"
ORDER_STATE_MAP = {
CANCELED_ORDER_STATE: OrderState.CANCELED,
REJECTED_ORDER_STATE: OrderState.FAILED,
EXPIRED_ORDER_STATE: OrderState.FAILED,
FULLY_EXECUTED_ORDER_STATE: OrderState.FILLED,
}
# fields
OMS_ID_FIELD = "OMSId"
USER_FIELD = "User"
USER_ID_FIELD = "UserId"
USER_NAME_FIELD = "UserName"
ACCOUNT_ID_FIELD = "AccountId"
INSTRUMENT_ID_FIELD = "InstrumentId"
BASE_FIELD = "Product1Symbol"
BASE_ID_FIELD = "Product1"
QUOTE_FIELD = "Product2Symbol"
QUOTE_ID_FIELD = "Product2"
FEE_PRODUCT_ID_FIELD = "FeeProductId"
FEE_AMOUNT_FIELD = "Fee"
START_TIME_FIELD = "StartTime"
TRADE_ID_FIELD = "TradeId"
AUTHENTICATED_FIELD = "Authenticated"
SESSION_TOKEN_FIELD = "SessionToken"
API_KEY_FIELD = "APIKey"
SIGNATURE_FIELD = "Signature"
NONCE_FIELD = "Nonce"
DEPTH_FIELD = "Depth"
INCLUDE_LAST_COUNT_FIELD = "IncludeLastCount"
TIME_IN_FORCE_FIELD = "TimeInForce"
CLIENT_ORDER_ID_FIELD = "ClientOrderId"
CL_ORDER_ID_FIELD = "ClOrderId" # yes, this and the above are not typos...
ORDER_ID_FIELD = "OrderId"
SIDE_FIELD = "Side"
QUANTITY_FIELD = "quantity"
ORDER_TYPE_FIELD = "OrderType"
LIMIT_PRICE_FIELD = "LimitPrice"
PRICE_FIELD = "Price"
TRADE_TIME_MS_FIELD = "TradeTimeMS"
RESULT_FIELD = "result"
ERROR_CODE_FIELD = "errorcode"
ERROR_MSG_FIELD = "errormsg"
PRODUCT_SYMBOL_FIELD = "ProductSymbol"
SYMBOL_FIELD = "Symbol"
AMOUNT_FIELD = "Amount"
ORIGINAL_QUANTITY_FIELD = "OrigQuantity"
QUANTITY_EXECUTED_FIELD = "QuantityExecuted"
ORDER_STATE_FIELD = "OrderState"
AMOUNT_ON_HOLD_FIELD = "Hold"
ORDER_UPDATE_TS_FIELD = "LastUpdatedTime"
IS_DISABLED_FIELD = "IsDisable"
SESSION_STATUS_FIELD = "SessionStatus"
MIN_QUANT_FIELD = "MinimumQuantity"
MIN_PRICE_INCR_FIELD = "PriceIncrement"
MIN_QUANT_INCR_FIELD = "QuantityIncrement"
RECEIVE_TIME_FIELD = "ReceiveTime"
LAST_TRADED_PRICE_FIELD = "LastTradedPx"
MSG_TYPE_FIELD = "m"
MSG_SEQUENCE_FIELD = "i"
MSG_ENDPOINT_FIELD = "n"
MSG_DATA_FIELD = "o"
TRADE_UPDATE_INSTRUMENT_ID_FIELD = 1
TRADE_UPDATE_AMOUNT_FIELD = 2
TRADE_UPDATE_PRICE_FIELD = 3
TRADE_UPDATE_TS_FIELD = 6
TRADE_UPDATE_SIDE_FIELD = 8
DIFF_UPDATE_TS_FIELD = 2
DIFF_UPDATE_PRICE_FIELD = 6
DIFF_UPDATE_INSTRUMENT_ID_FIELD = 7
DIFF_UPDATE_AMOUNT_FIELD = 8
DIFF_UPDATE_SIDE_FIELD = 9
# other
RESOURCE_NOT_FOUND_ERR_CODE = 104
WS_MESSAGE_TIMEOUT = 20
|
c07ea9dba8982e2880dfcc9492e47c5de6ae4cda
|
0ca218c0f54dac33a2ade4accfdf8f5be3207588
|
/test/orm/test_association.py
|
baf48a016c300be3ff4eaa358fdbd43ed90cc934
|
[
"MIT"
] |
permissive
|
sqlalchemy/sqlalchemy
|
9d949c67c9b5396b1f33e7ff0f3230c81babf5be
|
b382bff6e3464f039db0fd1f2ce1b79038675e48
|
refs/heads/main
| 2023-08-31T17:40:59.565421
| 2023-08-30T15:01:41
| 2023-08-30T15:01:41
| 159,271,175
| 8,083
| 1,489
|
MIT
| 2023-09-12T18:53:55
| 2018-11-27T03:35:03
|
Python
|
UTF-8
|
Python
| false
| false
| 7,399
|
py
|
test_association.py
|
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy.orm import relationship
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class AssociationTest(fixtures.MappedTest):
run_setup_classes = "once"
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"items",
metadata,
Column(
"item_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
)
Table(
"item_keywords",
metadata,
Column("item_id", Integer, ForeignKey("items.item_id")),
Column("keyword_id", Integer, ForeignKey("keywords.keyword_id")),
Column("data", String(40)),
)
Table(
"keywords",
metadata,
Column(
"keyword_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
)
@classmethod
def setup_classes(cls):
class Item(cls.Basic):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Item id=%d name=%s keywordassoc=%r" % (
self.item_id,
self.name,
self.keywords,
)
class Keyword(cls.Basic):
def __init__(self, name):
self.name = name
def __repr__(self):
return "Keyword id=%d name=%s" % (self.keyword_id, self.name)
class KeywordAssociation(cls.Basic):
def __init__(self, keyword, data):
self.keyword = keyword
self.data = data
def __repr__(self):
return "KeywordAssociation itemid=%d keyword=%r data=%s" % (
self.item_id,
self.keyword,
self.data,
)
@classmethod
def setup_mappers(cls):
KeywordAssociation, Item, Keyword = (
cls.classes.KeywordAssociation,
cls.classes.Item,
cls.classes.Keyword,
)
items, item_keywords, keywords = cls.tables.get_all(
"items", "item_keywords", "keywords"
)
cls.mapper_registry.map_imperatively(Keyword, keywords)
cls.mapper_registry.map_imperatively(
KeywordAssociation,
item_keywords,
properties={"keyword": relationship(Keyword, lazy="joined")},
primary_key=[item_keywords.c.item_id, item_keywords.c.keyword_id],
)
cls.mapper_registry.map_imperatively(
Item,
items,
properties={
"keywords": relationship(
KeywordAssociation,
order_by=item_keywords.c.data,
cascade="all, delete-orphan",
)
},
)
def test_insert(self):
KeywordAssociation, Item, Keyword = (
self.classes.KeywordAssociation,
self.classes.Item,
self.classes.Keyword,
)
sess = fixture_session()
item1 = Item("item1")
item2 = Item("item2")
item1.keywords.append(
KeywordAssociation(Keyword("blue"), "blue_assoc")
)
item1.keywords.append(KeywordAssociation(Keyword("red"), "red_assoc"))
item2.keywords.append(
KeywordAssociation(Keyword("green"), "green_assoc")
)
sess.add_all((item1, item2))
sess.flush()
saved = repr([item1, item2])
sess.expunge_all()
result = sess.query(Item).all()
loaded = repr(result)
eq_(saved, loaded)
def test_replace(self):
KeywordAssociation, Item, Keyword = (
self.classes.KeywordAssociation,
self.classes.Item,
self.classes.Keyword,
)
sess = fixture_session()
item1 = Item("item1")
item1.keywords.append(
KeywordAssociation(Keyword("blue"), "blue_assoc")
)
item1.keywords.append(KeywordAssociation(Keyword("red"), "red_assoc"))
sess.add(item1)
sess.flush()
red_keyword = item1.keywords[1].keyword
del item1.keywords[1]
item1.keywords.append(KeywordAssociation(red_keyword, "new_red_assoc"))
sess.flush()
saved = repr([item1])
sess.expunge_all()
result = sess.query(Item).all()
loaded = repr(result)
eq_(saved, loaded)
def test_modify(self):
KeywordAssociation, Item, Keyword = (
self.classes.KeywordAssociation,
self.classes.Item,
self.classes.Keyword,
)
sess = fixture_session()
item1 = Item("item1")
item2 = Item("item2")
item1.keywords.append(
KeywordAssociation(Keyword("blue"), "blue_assoc")
)
item1.keywords.append(KeywordAssociation(Keyword("red"), "red_assoc"))
item2.keywords.append(
KeywordAssociation(Keyword("green"), "green_assoc")
)
sess.add_all((item1, item2))
sess.flush()
red_keyword = item1.keywords[1].keyword
del item1.keywords[0]
del item1.keywords[0]
purple_keyword = Keyword("purple")
item1.keywords.append(KeywordAssociation(red_keyword, "new_red_assoc"))
item2.keywords.append(
KeywordAssociation(purple_keyword, "purple_item2_assoc")
)
item1.keywords.append(
KeywordAssociation(purple_keyword, "purple_item1_assoc")
)
item1.keywords.append(
KeywordAssociation(Keyword("yellow"), "yellow_assoc")
)
sess.flush()
saved = repr([item1, item2])
sess.expunge_all()
result = sess.query(Item).all()
loaded = repr(result)
eq_(saved, loaded)
def test_delete(self):
KeywordAssociation = self.classes.KeywordAssociation
Item = self.classes.Item
item_keywords = self.tables.item_keywords
Keyword = self.classes.Keyword
sess = fixture_session()
item1 = Item("item1")
item2 = Item("item2")
item1.keywords.append(
KeywordAssociation(Keyword("blue"), "blue_assoc")
)
item1.keywords.append(KeywordAssociation(Keyword("red"), "red_assoc"))
item2.keywords.append(
KeywordAssociation(Keyword("green"), "green_assoc")
)
sess.add_all((item1, item2))
sess.flush()
eq_(
sess.connection().scalar(
select(func.count("*")).select_from(item_keywords)
),
3,
)
sess.delete(item1)
sess.delete(item2)
sess.flush()
eq_(
sess.connection().scalar(
select(func.count("*")).select_from(item_keywords)
),
0,
)
|
25762df55f67dcc48b1492d27ded7cb055268809
|
abbf6a11c0590f6e5b7327e6f6df5a6c71af891d
|
/binstar_client/commands/move.py
|
4a991a6fcf23240fa79760ff2971fa763b42db30
|
[] |
permissive
|
Anaconda-Platform/anaconda-client
|
3ce7848d938cfe62a2bad397a958774e5d28f8ff
|
45fb0a363ba7833deccee6db82a26a0b51a7ca75
|
refs/heads/master
| 2023-08-30T21:11:47.468128
| 2023-08-08T14:36:23
| 2023-08-08T14:36:23
| 9,064,487
| 119
| 238
|
BSD-3-Clause
| 2023-09-14T15:10:54
| 2013-03-27T21:52:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,906
|
py
|
move.py
|
# -*- coding: utf-8 -*-
# pylint: disable=broad-except,protected-access,missing-function-docstring
"""
Move packages between labels.
"""
# Standard library imports
from __future__ import unicode_literals, print_function
import logging
# Local imports
from binstar_client import errors
from binstar_client.utils import get_server_api, parse_specs
logger = logging.getLogger('binstar.move')
def main(args):
aserver_api = get_server_api(args.token, args.site)
spec = args.spec
channels = aserver_api.list_channels(spec.user)
label_text = 'label' if (args.from_label and args.to_label) else 'channel'
from_label = args.from_label.lower()
to_label = args.to_label.lower()
if from_label not in channels:
raise errors.UserError(
'{} {} does not exist\n\tplease choose from: {}'.format(
label_text.title(),
from_label,
', '.join(channels)
))
if from_label == to_label:
raise errors.UserError('--from-label and --to-label must be different')
# Add files to to_label
try:
aserver_api.add_channel(
to_label,
spec.user,
package=spec.package,
version=spec._version,
filename=spec._basename,
)
except Exception as error:
logger.exception(error)
# Remove files from from_label
try:
aserver_api.remove_channel(
from_label,
spec.user,
package=spec.package,
version=spec._version,
filename=spec._basename,
)
except Exception as error:
logger.exception(error)
# for binstar_file in files:
# print("Copied file: %(basename)s" % binstar_file)
# if files:
# logger.info("Copied %i files" % len(files))
# else:
# logger.warning("Did not copy any files. Please check your inputs "
# "with \n\n\tanaconda show %s" % spec)
def add_parser(subparsers):
parser = subparsers.add_parser(
'move',
help='Move packages between labels',
description=__doc__,
)
parser.add_argument(
'spec',
help='Package - written as user/package/version[/filename] '
'If filename is not given, move all files in the version',
type=parse_specs,
)
# NOTE: To be implemented later on
# parser.add_argument(
# '--to-owner',
# help='User account to move package to (default: your account)',
# )
_from = parser.add_mutually_exclusive_group()
_to = parser.add_mutually_exclusive_group()
_from.add_argument(
'--from-label',
help='Label to move packages from',
default='main',
)
_to.add_argument(
'--to-label',
help='Label to move packages to',
default='main',
)
parser.set_defaults(main=main)
|
84f6f4791b942cbe029c0d45a28441b371fabc56
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/DQM/TrackingMonitor/python/TrackingMonitorAllTrackingSequences_cff.py
|
314357ff45ab62e754663b3be00295b615f1ca5f
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 3,382
|
py
|
TrackingMonitorAllTrackingSequences_cff.py
|
import FWCore.ParameterSet.Config as cms
#-------------------------------------------------
# Tracking Monitor
#-------------------------------------------------
from DQM.TrackingMonitor.TrackingMonitor_cfi import *
# properties
TrackMon.MeasurementState = cms.string('ImpactPoint')
# which plots to do
TrackMon.doTrackerSpecific = cms.bool(True)
TrackMon.doAllPlots = cms.bool(True)
TrackMon.doBeamSpotPlots = cms.bool(True)
TrackMon.doSeedParameterHistos = cms.bool(False)
# out of the box
# ---------------------------------------------------------------------------#
# generalTracks
TrackMonGenTk = TrackMon.clone(
TrackProducer = "generalTracks",
beamSpot = "offlineBeamSpot",
FolderName = 'Tracking/GenTk/GlobalParameters',
BSFolderName = 'Tracking/GenTk/BeamSpotParameters',
AlgoName = 'GenTk',
doSeedParameterHistos = False
)
# Step0
TrackMonStep0 = TrackMon.clone(
TrackProducer = "zeroStepTracksWithQuality",
SeedProducer = "initialStepSeeds",
TCProducer = "initialStepTrackCandidates",
beamSpot = "offlineBeamSpot",
FolderName = 'Tracking/Step0/GlobalParameters',
BSFolderName = 'Tracking/Step0/BeamSpotParameters',
AlgoName = 'Step0',
doSeedParameterHistos = True,
doTrackCandHistos = True
)
# Step1
TrackMonStep1 = TrackMon.clone(
TrackProducer = "preMergingFirstStepTracksWithQuality",
SeedProducer = "newSeedFromPairs",
TCProducer = "stepOneTrackCandidateMaker",
beamSpot = "offlineBeamSpot",
FolderName = 'Tracking/Step1/GlobalParameters',
BSFolderName = 'Tracking/Step1/BeamSpotParameters',
AlgoName = 'Step1',
doSeedParameterHistos = True,
doTrackCandHistos = True
)
# Step2
TrackMonStep2 = TrackMon.clone(
TrackProducer = "secStep",
SeedProducer = "secTriplets",
TCProducer = "secTrackCandidates",
beamSpot = "offlineBeamSpot",
FolderName = 'Tracking/Step2/GlobalParameters',
BSFolderName = 'Tracking/Step2/BeamSpotParameters',
AlgoName = 'Step2',
doSeedParameterHistos = True,
doTrackCandHistos = True
)
# Step4
TrackMonStep4 = TrackMon.clone(
TrackProducer = "pixellessStep",
SeedProducer = "fourthPLSeeds",
TCProducer = "fourthTrackCandidates",
beamSpot = "offlineBeamSpot",
FolderName = 'Tracking/Step4/GlobalParameters',
BSFolderName = 'Tracking/Step4/BeamSpotParameters',
AlgoName = 'Step4',
doSeedParameterHistos = True,
doTrackCandHistos = True
)
# Step4
TrackMonStep5 = TrackMon.clone(
TrackProducer = "tobtecStep",
SeedProducer = "fifthSeeds",
TCProducer = "fifthTrackCandidates",
beamSpot = "offlineBeamSpot",
FolderName = 'Tracking/Step5/GlobalParameters',
BSFolderName = 'Tracking/Step5/BeamSpotParameters',
AlgoName = 'Step5',
doSeedParameterHistos = True,
doTrackCandHistos = True
)
# high Purity
# ---------------------------------------------------------------------------#
#-------------------------------------------------
# Paths
#-------------------------------------------------
# out of the box
trkmonootb = cms.Sequence(
TrackMonGenTk
* TrackMonStep0
* TrackMonStep1
* TrackMonStep2
# * TrackMonStep3
* TrackMonStep4
* TrackMonStep5
)
# all paths
trkmon = cms.Sequence(
trkmonootb
# * trkmonhp
# * trkmontight
# * trkmonloose
)
|
4312016c6f3ea06034dfbb74ad3f70c0b4e04c73
|
aeef2494b283012ed619870c4275e7d015f4017a
|
/sdk/python/pulumi_gcp/folder/iam_policy.py
|
3af7f7de8086470bb983a4823be935415aa87ea9
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-gcp
|
d4fd3f80c3df5290edaf33eb5eafe34e6699d0ff
|
7deea0a50a4ee5ab7bd722a83eca01707e298f85
|
refs/heads/master
| 2023-08-31T07:12:45.921522
| 2023-08-31T06:16:27
| 2023-08-31T06:16:27
| 97,485,806
| 160
| 63
|
Apache-2.0
| 2023-09-14T19:49:36
| 2017-07-17T14:28:37
|
Java
|
UTF-8
|
Python
| false
| false
| 25,800
|
py
|
iam_policy.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['IAMPolicyArgs', 'IAMPolicy']
@pulumi.input_type
class IAMPolicyArgs:
def __init__(__self__, *,
folder: pulumi.Input[str],
policy_data: pulumi.Input[str]):
"""
The set of arguments for constructing a IAMPolicy resource.
:param pulumi.Input[str] folder: The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
:param pulumi.Input[str] policy_data: The `organizations_get_iam_policy` data source that represents
the IAM policy that will be applied to the folder. The policy will be
merged with any existing policy applied to the folder.
Changing this updates the policy.
Deleting this removes all policies from the folder, locking out users without
folder-level access.
"""
pulumi.set(__self__, "folder", folder)
pulumi.set(__self__, "policy_data", policy_data)
@property
@pulumi.getter
def folder(self) -> pulumi.Input[str]:
"""
The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
"""
return pulumi.get(self, "folder")
@folder.setter
def folder(self, value: pulumi.Input[str]):
pulumi.set(self, "folder", value)
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> pulumi.Input[str]:
"""
The `organizations_get_iam_policy` data source that represents
the IAM policy that will be applied to the folder. The policy will be
merged with any existing policy applied to the folder.
Changing this updates the policy.
Deleting this removes all policies from the folder, locking out users without
folder-level access.
"""
return pulumi.get(self, "policy_data")
@policy_data.setter
def policy_data(self, value: pulumi.Input[str]):
pulumi.set(self, "policy_data", value)
@pulumi.input_type
class _IAMPolicyState:
def __init__(__self__, *,
etag: Optional[pulumi.Input[str]] = None,
folder: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering IAMPolicy resources.
:param pulumi.Input[str] etag: (Computed) The etag of the folder's IAM policy.
:param pulumi.Input[str] folder: The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
:param pulumi.Input[str] policy_data: The `organizations_get_iam_policy` data source that represents
the IAM policy that will be applied to the folder. The policy will be
merged with any existing policy applied to the folder.
Changing this updates the policy.
Deleting this removes all policies from the folder, locking out users without
folder-level access.
"""
if etag is not None:
pulumi.set(__self__, "etag", etag)
if folder is not None:
pulumi.set(__self__, "folder", folder)
if policy_data is not None:
pulumi.set(__self__, "policy_data", policy_data)
@property
@pulumi.getter
def etag(self) -> Optional[pulumi.Input[str]]:
"""
(Computed) The etag of the folder's IAM policy.
"""
return pulumi.get(self, "etag")
@etag.setter
def etag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "etag", value)
@property
@pulumi.getter
def folder(self) -> Optional[pulumi.Input[str]]:
"""
The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
"""
return pulumi.get(self, "folder")
@folder.setter
def folder(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "folder", value)
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> Optional[pulumi.Input[str]]:
"""
The `organizations_get_iam_policy` data source that represents
the IAM policy that will be applied to the folder. The policy will be
merged with any existing policy applied to the folder.
Changing this updates the policy.
Deleting this removes all policies from the folder, locking out users without
folder-level access.
"""
return pulumi.get(self, "policy_data")
@policy_data.setter
def policy_data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_data", value)
class IAMPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
folder: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Four different resources help you manage your IAM policy for a folder. Each of these resources serves a different use case:
* `folder.IAMPolicy`: Authoritative. Sets the IAM policy for the folder and replaces any existing policy already attached.
* `folder.IAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the folder are preserved.
* `folder.IAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the folder are preserved.
* `folder.IamAuditConfig`: Authoritative for a given service. Updates the IAM policy to enable audit logging for the given service.
> **Note:** `folder.IAMPolicy` **cannot** be used in conjunction with `folder.IAMBinding`, `folder.IAMMember`, or `folder.IamAuditConfig` or they will fight over what your policy should be.
> **Note:** `folder.IAMBinding` resources **can be** used in conjunction with `folder.IAMMember` resources **only if** they do not grant privilege to the same role.
> **Note:** The underlying API method `projects.setIamPolicy` has constraints which are documented [here](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). In addition to these constraints,
IAM Conditions cannot be used with Basic Roles such as Owner. Violating these constraints will result in the API returning a 400 error code so please review these if you encounter errors with this resource.
## google\\_folder\\_iam\\_policy
!> **Be careful!** You can accidentally lock yourself out of your folder
using this resource. Deleting a `folder.IAMPolicy` removes access
from anyone without permissions on its parent folder/organization. Proceed with caution.
It's not recommended to use `folder.IAMPolicy` with your provider folder
to avoid locking yourself out, and it should generally only be used with folders
fully managed by this provider. If you do use this resource, it is recommended to **import** the policy before
applying the change.
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/editor",
members=["user:jane@example.com"],
)])
folder = gcp.folder.IAMPolicy("folder",
folder="folders/1234567",
policy_data=admin.policy_data)
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
condition=gcp.organizations.GetIAMPolicyBindingConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\\"2020-01-01T00:00:00Z\\")",
title="expires_after_2019_12_31",
),
members=["user:jane@example.com"],
role="roles/compute.admin",
)])
folder = gcp.folder.IAMPolicy("folder",
folder="folders/1234567",
policy_data=admin.policy_data)
```
## google\\_folder\\_iam\\_binding
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IAMBinding("folder",
folder="folders/1234567",
members=["user:jane@example.com"],
role="roles/editor")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IAMBinding("folder",
condition=gcp.folder.IAMBindingConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\\"2020-01-01T00:00:00Z\\")",
title="expires_after_2019_12_31",
),
folder="folders/1234567",
members=["user:jane@example.com"],
role="roles/container.admin")
```
## google\\_folder\\_iam\\_member
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IAMMember("folder",
folder="folders/1234567",
member="user:jane@example.com",
role="roles/editor")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IAMMember("folder",
condition=gcp.folder.IAMMemberConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\\"2020-01-01T00:00:00Z\\")",
title="expires_after_2019_12_31",
),
folder="folders/1234567",
member="user:jane@example.com",
role="roles/firebase.admin")
```
## google\\_folder\\_iam\\_audit\\_config
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IamAuditConfig("folder",
audit_log_configs=[
gcp.folder.IamAuditConfigAuditLogConfigArgs(
log_type="ADMIN_READ",
),
gcp.folder.IamAuditConfigAuditLogConfigArgs(
exempted_members=["user:joebloggs@hashicorp.com"],
log_type="DATA_READ",
),
],
folder="folders/1234567",
service="allServices")
```
## Import
IAM member imports use space-delimited identifiers; the resource in question, the role, and the account.
This member resource can be imported using the `folder`, role, and member e.g.
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy my_folder "folder roles/viewer user:foo@example.com"
```
IAM binding imports use space-delimited identifiers; the resource in question and the role.
This binding resource can be imported using the `folder` and role, e.g.
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy my_folder "folder roles/viewer"
```
IAM policy imports use the identifier of the resource in question.
This policy resource can be imported using the `folder`.
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy my_folder folder
```
IAM audit config imports use the identifier of the resource in question and the service, e.g.
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy my_folder "folder foo.googleapis.com"
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `organizations/{{org_id}}/roles/{{role_id}}`. -> **Conditional IAM Bindings**If you're importing a IAM binding with a condition block, make sure
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy to include the title of condition, e.g. `google_folder_iam_binding.my_folder "folder roles/{{role_id}} condition-title"`
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] folder: The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
:param pulumi.Input[str] policy_data: The `organizations_get_iam_policy` data source that represents
the IAM policy that will be applied to the folder. The policy will be
merged with any existing policy applied to the folder.
Changing this updates the policy.
Deleting this removes all policies from the folder, locking out users without
folder-level access.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IAMPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Four different resources help you manage your IAM policy for a folder. Each of these resources serves a different use case:
* `folder.IAMPolicy`: Authoritative. Sets the IAM policy for the folder and replaces any existing policy already attached.
* `folder.IAMBinding`: Authoritative for a given role. Updates the IAM policy to grant a role to a list of members. Other roles within the IAM policy for the folder are preserved.
* `folder.IAMMember`: Non-authoritative. Updates the IAM policy to grant a role to a new member. Other members for the role for the folder are preserved.
* `folder.IamAuditConfig`: Authoritative for a given service. Updates the IAM policy to enable audit logging for the given service.
> **Note:** `folder.IAMPolicy` **cannot** be used in conjunction with `folder.IAMBinding`, `folder.IAMMember`, or `folder.IamAuditConfig` or they will fight over what your policy should be.
> **Note:** `folder.IAMBinding` resources **can be** used in conjunction with `folder.IAMMember` resources **only if** they do not grant privilege to the same role.
> **Note:** The underlying API method `projects.setIamPolicy` has constraints which are documented [here](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy). In addition to these constraints,
IAM Conditions cannot be used with Basic Roles such as Owner. Violating these constraints will result in the API returning a 400 error code so please review these if you encounter errors with this resource.
## google\\_folder\\_iam\\_policy
!> **Be careful!** You can accidentally lock yourself out of your folder
using this resource. Deleting a `folder.IAMPolicy` removes access
from anyone without permissions on its parent folder/organization. Proceed with caution.
It's not recommended to use `folder.IAMPolicy` with your provider folder
to avoid locking yourself out, and it should generally only be used with folders
fully managed by this provider. If you do use this resource, it is recommended to **import** the policy before
applying the change.
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
role="roles/editor",
members=["user:jane@example.com"],
)])
folder = gcp.folder.IAMPolicy("folder",
folder="folders/1234567",
policy_data=admin.policy_data)
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
admin = gcp.organizations.get_iam_policy(bindings=[gcp.organizations.GetIAMPolicyBindingArgs(
condition=gcp.organizations.GetIAMPolicyBindingConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\\"2020-01-01T00:00:00Z\\")",
title="expires_after_2019_12_31",
),
members=["user:jane@example.com"],
role="roles/compute.admin",
)])
folder = gcp.folder.IAMPolicy("folder",
folder="folders/1234567",
policy_data=admin.policy_data)
```
## google\\_folder\\_iam\\_binding
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IAMBinding("folder",
folder="folders/1234567",
members=["user:jane@example.com"],
role="roles/editor")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IAMBinding("folder",
condition=gcp.folder.IAMBindingConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\\"2020-01-01T00:00:00Z\\")",
title="expires_after_2019_12_31",
),
folder="folders/1234567",
members=["user:jane@example.com"],
role="roles/container.admin")
```
## google\\_folder\\_iam\\_member
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IAMMember("folder",
folder="folders/1234567",
member="user:jane@example.com",
role="roles/editor")
```
With IAM Conditions:
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IAMMember("folder",
condition=gcp.folder.IAMMemberConditionArgs(
description="Expiring at midnight of 2019-12-31",
expression="request.time < timestamp(\\"2020-01-01T00:00:00Z\\")",
title="expires_after_2019_12_31",
),
folder="folders/1234567",
member="user:jane@example.com",
role="roles/firebase.admin")
```
## google\\_folder\\_iam\\_audit\\_config
```python
import pulumi
import pulumi_gcp as gcp
folder = gcp.folder.IamAuditConfig("folder",
audit_log_configs=[
gcp.folder.IamAuditConfigAuditLogConfigArgs(
log_type="ADMIN_READ",
),
gcp.folder.IamAuditConfigAuditLogConfigArgs(
exempted_members=["user:joebloggs@hashicorp.com"],
log_type="DATA_READ",
),
],
folder="folders/1234567",
service="allServices")
```
## Import
IAM member imports use space-delimited identifiers; the resource in question, the role, and the account.
This member resource can be imported using the `folder`, role, and member e.g.
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy my_folder "folder roles/viewer user:foo@example.com"
```
IAM binding imports use space-delimited identifiers; the resource in question and the role.
This binding resource can be imported using the `folder` and role, e.g.
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy my_folder "folder roles/viewer"
```
IAM policy imports use the identifier of the resource in question.
This policy resource can be imported using the `folder`.
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy my_folder folder
```
IAM audit config imports use the identifier of the resource in question and the service, e.g.
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy my_folder "folder foo.googleapis.com"
```
-> **Custom Roles**If you're importing a IAM resource with a custom role, make sure to use the
full name of the custom role, e.g. `organizations/{{org_id}}/roles/{{role_id}}`. -> **Conditional IAM Bindings**If you're importing a IAM binding with a condition block, make sure
```sh
$ pulumi import gcp:folder/iAMPolicy:IAMPolicy to include the title of condition, e.g. `google_folder_iam_binding.my_folder "folder roles/{{role_id}} condition-title"`
```
:param str resource_name: The name of the resource.
:param IAMPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IAMPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
folder: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IAMPolicyArgs.__new__(IAMPolicyArgs)
if folder is None and not opts.urn:
raise TypeError("Missing required property 'folder'")
__props__.__dict__["folder"] = folder
if policy_data is None and not opts.urn:
raise TypeError("Missing required property 'policy_data'")
__props__.__dict__["policy_data"] = policy_data
__props__.__dict__["etag"] = None
super(IAMPolicy, __self__).__init__(
'gcp:folder/iAMPolicy:IAMPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
etag: Optional[pulumi.Input[str]] = None,
folder: Optional[pulumi.Input[str]] = None,
policy_data: Optional[pulumi.Input[str]] = None) -> 'IAMPolicy':
"""
Get an existing IAMPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] etag: (Computed) The etag of the folder's IAM policy.
:param pulumi.Input[str] folder: The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
:param pulumi.Input[str] policy_data: The `organizations_get_iam_policy` data source that represents
the IAM policy that will be applied to the folder. The policy will be
merged with any existing policy applied to the folder.
Changing this updates the policy.
Deleting this removes all policies from the folder, locking out users without
folder-level access.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IAMPolicyState.__new__(_IAMPolicyState)
__props__.__dict__["etag"] = etag
__props__.__dict__["folder"] = folder
__props__.__dict__["policy_data"] = policy_data
return IAMPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
(Computed) The etag of the folder's IAM policy.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def folder(self) -> pulumi.Output[str]:
"""
The resource name of the folder the policy is attached to. Its format is folders/{folder_id}.
"""
return pulumi.get(self, "folder")
@property
@pulumi.getter(name="policyData")
def policy_data(self) -> pulumi.Output[str]:
"""
The `organizations_get_iam_policy` data source that represents
the IAM policy that will be applied to the folder. The policy will be
merged with any existing policy applied to the folder.
Changing this updates the policy.
Deleting this removes all policies from the folder, locking out users without
folder-level access.
"""
return pulumi.get(self, "policy_data")
|
ae98f93306ea40d7fbebeac6b98284276b593d47
|
d8a7f93044f7342bbb67397c3c18319ad56c06ba
|
/doc/makedocumentation.py
|
917e966edb5bb747c1fad1d0942afbf7dd148b8b
|
[
"BSD-3-Clause"
] |
permissive
|
leeping/forcebalance
|
16e934be0668dde1a02ef6a65c3cc90151057a5c
|
10b9280cab9735634d25a4f9c8b3e84cb13a6bb1
|
refs/heads/master
| 2023-07-19T20:06:23.869239
| 2023-02-08T17:56:55
| 2023-02-08T17:56:55
| 11,302,623
| 122
| 76
|
NOASSERTION
| 2023-09-06T13:52:28
| 2013-07-10T04:46:42
|
Python
|
UTF-8
|
Python
| false
| false
| 15,123
|
py
|
makedocumentation.py
|
"""This file contains a documentation generating script. Doxygen
is used to do the actual generation, so these functions act primarily to
streamline the process and provide some customizations to the automatically
generated documents.
The motivation is:
- Want to have automatic online docs
- Want to version-control PDF manuals
- Don't want to version-control HTML / latex files (it generates too many changes)
The solution is:
- Have a separate gh-pages branch that only keeps track of latex and html folders, and PDF files
- Pushing to gh-pages branch will update documentation on
http://leeping.github.io/forcebalance/doc/html/index.html and
http://leeping.github.io/forcebalance/doc/ForceBalance-Manual.pdf
The workflow for generating documentation:
- Generate doxygen config files from source controlled templates
- Generate option index using make-option-index.py
- Generate doxygen source files from source controlled text files
- Delete existing HTML and latex files
- Run doxygen to generate HTML files and latex files
- Hack the HTML files to add extra tabs
- Run latex to generate PDF documents
If upstream update is requested:
- Commit master branch (because manual is part of master branch)
- Move html and latex folders out of the way, check out gh-pages branch, update html and latex folders
This is because html and latex folders are not tracked by the master branch,
and if we check out the gh-pages branch we will get an error.
- Check out updated manuals from master branch
- Commit gh-pages branch and push upstream
- Check out master branch and restore folder locations
How to do this effectively:
- Make sure doxypy executable is in the PATH
- Make sure dot (from graphviz) is in the PATH
- Make sure version numbers are correct in four places: .api.cfg, .doxygen.cfg, header.tex, api_header.tex
The comment syntax below in a docstring will break it:
Quantity
========
Base class for thermodynamical quantity used for fitting. This can
be any experimental data that can be calculated as an ensemble
average from a simulation.
"""
from __future__ import print_function
from builtins import input
import os, sys
import re
import shutil
import subprocess
import argparse
from traceback import print_exc
from socket import gethostname
from datetime import datetime
def build(interactive=False, upstream=False):
if interactive:
display = lambda txt : input("$ %s " % txt)
else:
display = lambda txt : sys.stdout.write("$ %s\n" % txt)
print("\n# Build list of documented options")
display("python make-option-index.py > option_index.txt")
os.system("python make-option-index.py > option_index.txt")
# generate pages to be included in general documentation
mainpage=""
mainpage+="/**\n\n\\mainpage\n\n"
for fnm in ["introduction.txt", "installation.txt", "usage.txt", "tutorial.txt", "glossary.txt", "option_index.txt"]:
page=open(fnm,'r')
mainpage+=page.read()
mainpage+="\n\\image latex ForceBalance.pdf \"Logo.\" height=10cm\n\n*/"
# generate pages to be included in API documentation
api=""
api+="/**\n\n"
for fnm in ["roadmap.txt"]:
page=open(fnm,'r')
api+=page.read()
api+="\n\n*/"
# First attempt to generate documentation.
try:
with open('mainpage.dox','w') as f:
f.write(mainpage)
with open('api.dox','w') as f:
f.write(api)
# Delete HTML and API documentation folders
display("rm -rf html latex html_ latex_")
os.system("rm -rf html latex html_ latex_")
# Run doxygen to generate general documentation
print("\n# run doxygen with doxygen.cfg as input to generate general documentation")
display("doxygen doxygen.cfg")
if subprocess.call(['doxygen', 'doxygen.cfg']): raise OSError("Doxygen returned nonzero value while working on doxygen.cfg")
# Run doxygen to generate technical (API) documentation
print("\n# run doxygen with api.cfg as input to generate API documentation")
display("doxygen api.cfg")
if subprocess.call(['doxygen', 'api.cfg']): raise OSError("Doxygen returned nonzero value while working on api.cfg")
# add_tabs script adjusts html
print("\n# run add_tabs function to adjust tabs on html generated by doxygen")
display("python -c 'from makedocumentation import add_tabs; add_tabs()'")
add_tabs()
# Compile pdf formats
print("\n# Copy images referenced in latex files to proper folders")
display("cp Images/ForceBalance.pdf latex/ && cp Images/ForceBalance.pdf latex/api/")
if not os.path.exists('latex/api'):
os.makedirs('latex/api')
shutil.copy('Images/ForceBalance.pdf','latex/')
shutil.copy('Images/ForceBalance.pdf','latex/api/')
print("\n# Compile generated latex documentation into pdf")
display("cd latex && make")
os.chdir('latex')
if subprocess.call(['make']): raise OSError("make returned nonzero value while compiling latex/")
print("\n# Copy generated pdf up to /doc directory")
display("cd .. && cp latex/refman.pdf ForceBalance-Manual.pdf")
os.chdir('..')
shutil.copy('latex/refman.pdf', 'ForceBalance-Manual.pdf')
#print "\n#Compile generated latex API documentation into pdf"
#display("cd latex/api/ && make")
#os.chdir('latex/api/')
#if subprocess.call(['make']): raise OSError("make returned nonzero value while compiling latex/api/")
#print "\n# Copy generated API pdf up to /doc directory"
#display("cd ../.. && cp latex/api/refman.pdf ForceBalance-API.pdf")
#os.chdir('../..')
#shutil.copy('latex/api/refman.pdf', 'ForceBalance-API.pdf')
except:
print_exc()
upstream = False # since documentation generation failed,
input("\n# encountered ERROR (above). Documentation could not be generated.")
sys.exit(1)
if upstream:
print("\n# Switch to documentation branch before writing files")
# Move folders to temporary location prior to branch switch
for fnm in ["latex", "html"]:
display("mv %s %s_" % (fnm, fnm))
os.system("mv %s %s_" % (fnm, fnm))
# Make sure we only push the current branch
display("git config --global push.default current")
os.system("git config --global push.default current")
input("\n Press a key to COMMIT the master branch (will update manuals).")
display('git commit -a -m "Automatic documentation generation at %s on %s"' % (gethostname(), datetime.now().strftime("%m-%d-%Y %H:%M")))
if os.system('git commit -a -m "Automatic documentation generation at %s on %s"' % (gethostname(), datetime.now().strftime("%m-%d-%Y %H:%M"))):
raise OSError("Error trying to commit files to local master branch")
# Check out the gh-pages branch
display("git checkout gh-pages")
if os.system("git checkout gh-pages"):
print("\n# encountered ERROR in checking out branch (above). Please commit files and try again.")
for fnm in ["latex", "html"]:
os.system("mv %s_ %s" % (fnm, fnm))
sys.exit(1)
# Rsync the newly generated html and latex folders
display("rsync -a --delete html_/ html")
os.system("rsync -a --delete html_/ html")
display("rsync -a --delete latex_/ latex")
os.system("rsync -a --delete latex_/ latex")
display("git checkout master ForceBalance-API.pdf")
os.system("git checkout master ForceBalance-API.pdf")
display("git checkout master ForceBalance-Manual.pdf")
os.system("git checkout master ForceBalance-Manual.pdf")
try:
# Commit the new html and latex files
print("\n# Stage changes for commit")
display("git add html latex")
if os.system('git add html latex'): raise OSError("Error trying to stage files for commit")
print("\n# Commit changes locally")
display('git commit -a -m "Automatic documentation generation at %s on %s"' % (gethostname(), datetime.now().strftime("%m-%d-%Y %H:%M")))
if os.system('git commit -a -m "Automatic documentation generation at %s on %s"' % (gethostname(), datetime.now().strftime("%m-%d-%Y %H:%M"))):
raise OSError("Error trying to commit files to local gh-pages branch")
display("git push")
print("\n# Push updated documentation upstream")
display("git push")
if os.system('git push'):
raise OSError("While trying to push changes upstream 'git push' gave a nonzero return code")
print("\n# Documentation successfully pushed upstream!")
except:
print_exc()
input("\n# encountered ERROR (above). Will not push changes upstream. Press a key")
finally:
print("\n# Switch back to master branch")
display("git checkout master")
os.system('git checkout master')
for fnm in ["latex", "html"]:
os.system("mv %s_ %s" % (fnm, fnm))
def add_tabs():
"""Adjust tabs in html version of documentation"""
"""
Made obsolete in 2020-07-09
for fnm in os.listdir('./html/'):
if re.match('.*\.html$',fnm):
fnm = './html/' + fnm
newfile = []
installtag = ' class="current"' if fnm.split('/')[-1] == 'installation.html' else ''
usagetag = ' class="current"' if fnm.split('/')[-1] == 'usage.html' else ''
tutorialtag = ' class="current"' if fnm.split('/')[-1] == 'tutorial.html' else ''
glossarytag = ' class="current"' if fnm.split('/')[-1] == 'glossary.html' else ''
todotag = ' class="current"' if fnm.split('/')[-1] == 'todo.html' else ''
for line in open(fnm):
newfile.append(line)
if re.match('.*a href="index\.html"',line):
newfile.append(' <li%s><a href="installation.html"><span>Installation</span></a></li>\n' % installtag)
newfile.append(' <li%s><a href="usage.html"><span>Usage</span></a></li>\n' % usagetag)
newfile.append(' <li%s><a href="tutorial.html"><span>Tutorial</span></a></li>\n' % tutorialtag)
newfile.append(' <li%s><a href="glossary.html"><span>Glossary</span></a></li>\n' % glossarytag)
newfile.append(' <li><a href="api/roadmap.html"><span>API</span></a></li>\n')
with open(fnm,'w') as f: f.writelines(newfile)
"""
menudata_js = """
var menudata={children:[
{text:"Main Page",url:"index.html"},
{text:"Installation",url:"installation.html"},
{text:"Usage",url:"usage.html"},
{text:"Tutorial",url:"tutorial.html"},
{text:"Glossary",url:"glossary.html"},
{text:"Roadmap",url:"todo.html"},
{text:"API",url:"api/index.html"},
{text:"Files",url:"files.html",children:[
{text:"File List",url:"files.html"}]}]}
"""
with open(os.path.join('html', 'menudata.js'), 'w') as f:
f.write(menudata_js)
for fnm in os.listdir('./html/api/'):
if re.match('.*\.html$',fnm):
fnm = './html/api/' + fnm
newfile=[]
for line in open(fnm):
if re.match('.*a href="index\.html"',line):
newfile.append(' <li><a href="../index.html"><span>Main Page</span></a></li>\n')
newfile.append(' <li ')
if re.match('.*roadmap\.html$', fnm): newfile.append('class="current"')
newfile.append('><a href="roadmap.html"><span>Project Roadmap</span></a></li>\n')
else: newfile.append(line)
with open(fnm,'w') as f: f.writelines(newfile)
def find_forcebalance():
"""try to find forcebalance location in standard python path"""
forcebalance_dir=""
try:
import forcebalance
forcebalance_dir = forcebalance.__path__[0]
except:
print("Unable to find forcebalance directory in PYTHON PATH (Is it installed?)")
print("Try running forcebalance/setup.py or you can always set the INPUT directory")
print("manually in api.cfg")
exit()
print('ForceBalance directory is:', forcebalance_dir)
return forcebalance_dir
def find_doxypy():
"""Check if doxypy is in system path or else ask for location of doxypy.py"""
doxypy_path=""
try:
# first check to see if doxypy is in system path
if subprocess.call(["doxypy", "makedocumentation.py"],stdout=open(os.devnull)): raise OSError()
doxypy_path="doxypy"
except OSError:
doxypy_path=input("Enter location of doxypy.py: ")
if not os.path.exists(doxypy_path) or doxypy_path[-9:] != 'doxypy.py':
print("Invalid path to doxypy")
exit()
return doxypy_path
def build_config():
forcebalance_path = find_forcebalance()
doxypy_path = find_doxypy()
with open('.doxygen.cfg', 'r') as f:
lines = f.readlines()
with open('doxygen.cfg', 'w') as f:
for line in lines:
if line.startswith('FILTER_PATTERNS ='):
option = 'FILTER_PATTERNS = "*.py=' + doxypy_path + '"\n'
f.write(option)
else:
f.write(line)
with open('.api.cfg', 'r') as f:
lines = f.readlines()
with open('api.cfg', 'w') as f:
for line in lines:
if line.startswith('INPUT ='):
option = 'INPUT = api.dox ' + forcebalance_path + '\n'
f.write(option)
elif line.startswith('FILTER_PATTERNS ='):
option = 'FILTER_PATTERNS = "*.py=' + doxypy_path + '"\n'
f.write(option)
else:
f.write(line)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--interactive', '-i', action='store_true', help="run in interactive mode, pausing before each command")
parser.add_argument('--clean', '-k', action='store_true', help="remove temporary files after script is complete")
parser.add_argument('--configure', '-c', action='store_true', help="generate doxygen configuration files from templates")
parser.add_argument('--upstream', '-u', action='store_true', help="push updated documentation to upstream github repository")
args = parser.parse_args()
if args.configure:
build_config()
elif not os.path.isfile('doxygen.cfg') or not os.path.isfile('api.cfg'):
print("Couldn't find required doxygen config files ('./doxygen.cfg' and './api.cfg').\nRun with --configure option to generate automatically")
sys.exit(1)
build(interactive = args.interactive, upstream = args.upstream)
if args.clean:
print("Cleaning up...")
os.system("rm -rf latex option_index.txt api.dox mainpage.dox") # cleanup
|
04ee51ed8205d0beba94c5fc9ed915ae79a7636e
|
4317bb9f5964962a3c90ea1fb6cf390ed37301c7
|
/tests/test_multilineentry.py
|
c02892920eca7d31fbbe2f21fb352d5e2315a846
|
[
"MIT"
] |
permissive
|
joaoventura/pylibui
|
44d894f0c70e59eb3ee5d7e885f4ebded008ec56
|
e90095667b3900f000887aef010424a737efb119
|
refs/heads/master
| 2022-08-27T13:45:20.255145
| 2022-08-21T21:13:19
| 2022-08-21T21:13:19
| 59,297,405
| 239
| 38
|
MIT
| 2022-08-21T21:13:20
| 2016-05-20T13:40:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
test_multilineentry.py
|
"""
Pylibui test suite.
"""
from pylibui.controls import MultilineEntry, NonWrappingMultilineEntry
from tests.utils import WindowTestCase
class MultilineEntryTest(WindowTestCase):
def setUp(self):
super().setUp()
self.entry = MultilineEntry()
def test_text_initial_value(self):
"""Tests the multiline entry's `text` initial value is empty."""
self.assertEqual(self.entry.getText(), '')
def test_text_can_be_changed(self):
"""Tests the multiline entry's `text` attribute can be changed."""
text = 'My entry'
self.entry.setText(text)
self.assertEqual(self.entry.getText(), text)
def test_text_can_be_appended(self):
"""Tests the multiline entry's `text` attribute can be appended."""
self.entry.append('Some')
self.entry.append('Text')
self.assertEqual(self.entry.getText(), 'SomeText')
def test_read_only_initial_value(self):
"""Tests the multiline entry's `read_only` initial value is False."""
self.assertEqual(self.entry.getReadOnly(), False)
def test_read_only_can_be_set_to_true(self):
"""Tests the multiline entry's `read_only` attribute can be set
to True."""
self.entry.setReadOnly(True)
self.assertEqual(self.entry.getReadOnly(), True)
def test_read_only_can_be_set_to_false(self):
"""Tests the multiline entry's `read_only` attribute can be set
to False."""
self.entry.setReadOnly(False)
self.assertEqual(self.entry.getReadOnly(), False)
|
02a3241acb734e1df7471fc293f3a43639e320c6
|
57bc404899f914eeef7ba298bf1e99883c864a26
|
/linked_list/library/linked_list.py
|
9f0160a79f8291be8e13a5e14c2bc678b343b0af
|
[
"MIT"
] |
permissive
|
priyankchheda/algorithms
|
547f19193273ac6a424fe4ba5e1375cc02ea4f60
|
38a5de72db14ef2664489da9857b598d24c4e276
|
refs/heads/master
| 2023-08-17T17:10:10.044940
| 2022-04-16T13:52:37
| 2022-04-16T13:52:37
| 133,684,565
| 195
| 38
|
MIT
| 2023-08-16T10:26:48
| 2018-05-16T15:10:56
|
C++
|
UTF-8
|
Python
| false
| false
| 5,325
|
py
|
linked_list.py
|
""" Implementation of Singly Linked List Data Structure """
class Node:
""" Node class contains everything related to Linked List node """
def __init__(self, data):
""" initializing single node with data """
self.data = data
self.next = None
def __repr__(self):
return f"Node: data={self.data}"
def get_data(self):
""" Return the self.data attribute. """
return self.data
def set_data(self, new_data):
""" replace the existing value of the self.data attribute with
new_data parameter
"""
self.data = new_data
def get_next(self):
""" return the self.next attribute. """
return self.next
def set_next(self, new_next):
""" replace the existing value of the self.next attribute with
new_next parameter
"""
self.next = new_next
class LinkedList:
""" Singly Linked List is a linear data structure.
Unlike arrays, linked list elements are not stored at a contiguous
location; the elements are linked using pointers.
"""
def __init__(self):
""" initializing singly linked list with zero node """
self.head = None
def __len__(self):
""" returns the number of nodes currently present in linked list """
current = self.head
count = 0
while current is not None:
count = count + 1
current = current.get_next()
return count
def __repr__(self):
return f"LinkedList: head={self.head}"
def get_head(self):
""" returns the first linked list node """
return self.head
def is_empty(self):
""" returns true if the Linked List is empty. Otherwise, return false
"""
return self.__len__() == 0
def insert_head(self, data):
""" inserts node at the start of linked list """
node = Node(data)
node.set_next(self.head)
self.head = node
def insert_tail(self, data):
""" inserts node at the end of linked list """
node = Node(data)
if self.head is None:
self.head = node
else:
current = self.head
while current.get_next() is not None:
current = current.get_next()
current.set_next(node)
def insert_at(self, data, position):
""" inserts node at specified position in linked list """
length = self.__len__()
if position <= 1:
self.insert_head(data)
elif position >= length:
self.insert_tail(data)
else:
node = Node(data)
previous = self.head
current = self.head
for _ in range(1, position):
previous = current
current = current.get_next()
previous.set_next(node)
node.set_next(current)
def delete_head(self):
""" removes first linked list node and returns data. Raise exception,
if linkedlist is empty
"""
if self.head is None:
raise IndexError("linkedlist is empty")
node = self.head
data = node.get_data()
self.head = self.head.get_next()
return data
def delete_tail(self):
""" removes last linked list node and returns data. raise exception,
if linkedlist is empty
"""
if self.head is None:
raise IndexError("linkedlist is empty")
current = self.head
if current.get_next() is None:
self.head = None
else:
previous = self.head
while current.get_next() is not None:
previous = current
current = current.get_next()
previous.set_next(None)
return current.get_data()
def delete_at(self, position):
""" removes specified node from linked list and returns data.
raise exception, if position is invalid.
"""
length = self.__len__()
if position < 1 or position > length:
raise ValueError("invalid position")
if position == 1:
return self.delete_head()
elif position == length:
return self.delete_tail()
else:
previous = self.head
current = self.head
for _ in range(1, position):
previous = current
current = current.get_next()
removed_data = current.get_data()
previous.set_next(current.get_next())
current.set_next(None)
return removed_data
def data_at(self, position):
""" returns node data without removing it.
raise exception, if position is invalid.
"""
length = self.__len__()
if position < 1 or position > length:
raise ValueError("invalid position")
current = self.head()
for _ in range(1, position):
current = current.get_next()
return current.get_data()
def print(self):
""" prints entire linked list without changing underlying data """
current = self.head
while current is not None:
print(" -> ", current.get_data())
current = current.get_next()
print()
|
db01e168f398a10d8682d57a00e702e642e44b33
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/air/_internal/filelock.py
|
2642eaa049ad3635973142a820c904e5bce18d62
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,430
|
py
|
filelock.py
|
from filelock import FileLock
import hashlib
import os
from pathlib import Path
import ray
RAY_LOCKFILE_DIR = "_ray_lockfiles"
class TempFileLock:
"""FileLock wrapper that uses temporary file locks.
The temporary directory that these locks are saved to can be configured via
the `RAY_TMPDIR` environment variable.
Args:
path: The file path that this temporary file lock is used for.
This will be used to generate the lockfile filename.
Ex: For concurrent writes to a file, this is the common filepath
that multiple processes are writing to.
**kwargs: Additional keyword arguments to pass to the underlying `FileLock`.
"""
def __init__(self, path: str, **kwargs):
self.path = path
temp_dir = Path(ray._private.utils.get_user_temp_dir()).resolve()
self._lock_dir = temp_dir / RAY_LOCKFILE_DIR
self._path_hash = hashlib.md5(
str(Path(self.path).resolve()).encode("utf-8")
).hexdigest()
self._lock_path = self._lock_dir / f"{self._path_hash}.lock"
os.makedirs(str(self._lock_dir), exist_ok=True)
self._lock = FileLock(self._lock_path, **kwargs)
def __enter__(self):
self._lock.acquire()
return self
def __exit__(self, type, value, traceback):
self._lock.release()
def __getattr__(self, name):
return getattr(self._lock, name)
|
917c5bef27a58fdcd004916624589882e6029a2a
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/homeassistant/components/netgear/update.py
|
b0e9a26864b227b05bc1c76b978a087569c3803f
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 2,804
|
py
|
update.py
|
"""Update entities for Netgear devices."""
from __future__ import annotations
import logging
from typing import Any
from homeassistant.components.update import (
UpdateDeviceClass,
UpdateEntity,
UpdateEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN, KEY_COORDINATOR_FIRMWARE, KEY_ROUTER
from .router import NetgearRouter, NetgearRouterCoordinatorEntity
LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up update entities for Netgear component."""
router = hass.data[DOMAIN][entry.entry_id][KEY_ROUTER]
coordinator = hass.data[DOMAIN][entry.entry_id][KEY_COORDINATOR_FIRMWARE]
entities = [NetgearUpdateEntity(coordinator, router)]
async_add_entities(entities)
class NetgearUpdateEntity(NetgearRouterCoordinatorEntity, UpdateEntity):
"""Update entity for a Netgear device."""
_attr_device_class = UpdateDeviceClass.FIRMWARE
_attr_supported_features = UpdateEntityFeature.INSTALL
def __init__(
self,
coordinator: DataUpdateCoordinator,
router: NetgearRouter,
) -> None:
"""Initialize a Netgear device."""
super().__init__(coordinator, router)
self._name = f"{router.device_name} Update"
self._unique_id = f"{router.serial_number}-update"
@property
def installed_version(self) -> str | None:
"""Version currently in use."""
if self.coordinator.data is not None:
return self.coordinator.data.get("CurrentVersion")
return None
@property
def latest_version(self) -> str | None:
"""Latest version available for install."""
if self.coordinator.data is not None:
new_version = self.coordinator.data.get("NewVersion")
if new_version is not None and not new_version.startswith(
self.installed_version
):
return new_version
return self.installed_version
@property
def release_summary(self) -> str | None:
"""Release summary."""
if self.coordinator.data is not None:
return self.coordinator.data.get("ReleaseNote")
return None
async def async_install(
self, version: str | None, backup: bool, **kwargs: Any
) -> None:
"""Install the latest firmware version."""
await self._router.async_update_new_firmware()
@callback
def async_update_device(self) -> None:
"""Update the Netgear device."""
|
5447b3d592ce1822ac8b224e62fabfdab6952aa0
|
55a973178ab56ae2a99c7f30bb65dd11270f4acb
|
/capstone/capapi/tasks.py
|
e2c244d76cbbe1763924a279fbb275975754b5e2
|
[
"MIT"
] |
permissive
|
harvard-lil/capstone
|
fb8e72bedfe5d902293acb566c864e153da3298e
|
bec56eaa4bfb62a44260e85cf76b421172de10e0
|
refs/heads/develop
| 2023-08-25T11:15:54.572758
| 2023-08-23T13:04:38
| 2023-08-23T13:04:38
| 82,964,836
| 153
| 47
|
MIT
| 2023-09-13T15:07:30
| 2017-02-23T19:44:44
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,284
|
py
|
tasks.py
|
from datetime import timedelta
import requests
from celery import shared_task
from django.conf import settings
from django.core.cache import cache
from django.core.mail import send_mail
from django.db import connections
from django.utils import timezone
from capweb.helpers import statement_timeout, StatementTimeout
@shared_task
def daily_site_limit_reset_and_report():
from capapi.models import SiteLimits, CapUser # import here to avoid circular import
site_limits = SiteLimits.get()
# send admin email
users_created_today = CapUser.objects.filter(date_joined__gte=timezone.now()-timedelta(days=1)).values_list('email', flat=True)
if settings.SITE_LIMIT_REPORT:
send_mail(
'CAP daily usage: %s registered users, %s blacklisted downloads' % (site_limits.daily_signups, site_limits.daily_downloads),
"""
Blacklist cases downloaded: %s
User signups: %s
User emails:
%s
""" % (site_limits.daily_downloads, site_limits.daily_signups, "\n".join(users_created_today)),
settings.DEFAULT_FROM_EMAIL,
[settings.DEFAULT_FROM_EMAIL],
fail_silently=False,
)
# log status
print("CAP daily usage report: created %s new users, %s blacklisted cases downloaded" % (site_limits.daily_signups, site_limits.daily_downloads))
# reset limits
SiteLimits.reset()
# notify external service
try:
url = settings.HEALTHCHECK_URL["%s.daily_site_limit_reset_and_report" % __name__]
try:
r = requests.get(url)
r.raise_for_status()
except requests.exceptions.RequestException:
print("CAP daily usage report was unable to notify healthcheck service.")
except KeyError:
pass
@shared_task
def cache_query_count(sql, cache_key):
""" Cache the result of a count() sql query, because it didn't return quickly enough the first time. """
try:
with connections["capdb"].cursor() as cursor, statement_timeout(settings.TASK_COUNT_TIME_LIMIT, "capdb"):
cursor.execute(sql)
result = cursor.fetchone()
cache.set(cache_key, result[0], settings.CACHED_COUNT_TIMEOUT)
except StatementTimeout:
pass # this count takes too long to calculate -- move on
|
882f71dd8725dc80af0a13b067c5822d7b5c3079
|
45be4ca6db49cfeeee722f94a21481634898c851
|
/deepneuro/models/blocks.py
|
eeab212f9967d9cec6cbe10e073be9d7a8daacc6
|
[
"MIT"
] |
permissive
|
QTIM-Lab/DeepNeuro
|
30de49d7cf5d15411591988bca5e284b4fe52ff5
|
8a55a958660227859439df003ac39b98ce3da1b0
|
refs/heads/master
| 2021-07-13T01:05:19.374945
| 2020-06-24T13:00:14
| 2020-06-24T13:00:14
| 93,092,834
| 122
| 40
|
MIT
| 2019-12-12T09:30:31
| 2017-06-01T19:36:34
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 10,630
|
py
|
blocks.py
|
import numpy as np
from deepneuro.models.dn_ops import DnConv, DnPixelNorm, DnUpsampling, DnMaxPooling, DnBatchNormalization, DnDropout, DnAveragePooling
from deepneuro.models.ops import leaky_relu, minibatch_state_concat
def generator(model, latent_var, depth=1, initial_size=(4, 4), max_size=None, reuse=False, transition=False, alpha_transition=0, name=None):
"""Summary
Parameters
----------
model : TYPE
Description
latent_var : TYPE
Description
depth : int, optional
Description
initial_size : tuple, optional
Description
max_size : None, optional
Description
reuse : bool, optional
Description
transition : bool, optional
Description
alpha_transition : int, optional
Description
name : None, optional
Description
Returns
-------
TYPE
Description
"""
import tensorflow as tf
with tf.variable_scope(name) as scope:
convs = []
if reuse:
scope.reuse_variables()
convs += [tf.reshape(latent_var, [tf.shape(latent_var)[0]] + [1] * model.dim + [model.latent_size])]
# TODO: refactor the padding on this step. Or replace with a dense layer?
with tf.variable_scope('generator_n_conv_1_{}'.format(convs[-1].shape[1])):
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=initial_size, stride_size=(1,) * model.dim, padding='Other', dim=model.dim)), model.dim)
convs += [tf.reshape(convs[-1], [tf.shape(latent_var)[0]] + list(initial_size) + [model.get_filter_num(0)])]
with tf.variable_scope('generator_n_conv_2_{}'.format(convs[-1].shape[1])):
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)
for i in range(depth):
# Calculate Next Upsample Ratio
if max_size is None:
upsample_ratio = (2,) * model.dim
else:
upsample_ratio = []
for size_idx, size in enumerate(max_size):
if size >= convs[-1].shape[size_idx + 1] * 2:
upsample_ratio += [2]
else:
upsample_ratio += [1]
upsample_ratio = tuple(upsample_ratio)
# Upsampling, with conversion to RGB if necessary.
if i == depth - 1 and transition:
transition_conv = DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]))
transition_conv = DnUpsampling(transition_conv, upsample_ratio, dim=model.dim)
convs += [DnUpsampling(convs[-1], upsample_ratio, dim=model.dim)]
# Convolutional blocks. TODO: Replace with block module.
with tf.variable_scope('generator_n_conv_1_{}'.format(convs[-1].shape[1])):
convs[-1] = DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)
with tf.variable_scope('generator_n_conv_2_{}'.format(convs[-1].shape[1])):
convs += [DnPixelNorm(leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(i + 1), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, dim=model.dim)), dim=model.dim)]
# Conversion to RGB
convs += [DnConv(convs[-1], output_dim=model.channels, kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='generator_y_rgb_conv_{}'.format(convs[-1].shape[1]), dim=model.dim)]
if transition:
convs[-1] = (1 - alpha_transition) * transition_conv + alpha_transition * convs[-1]
return convs[-1]
def discriminator(model, input_image, reuse=False, initial_size=(4, 4), max_size=None, name=None, depth=1, transition=False, alpha_transition=0, **kwargs):
import tensorflow as tf
"""
"""
with tf.variable_scope(name) as scope:
convs = []
if reuse:
scope.reuse_variables()
# fromRGB
convs += [leaky_relu(DnConv(input_image, output_dim=model.get_filter_num(depth), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(input_image.shape[1]), dim=model.dim))]
for i in range(depth):
# Convolutional blocks. TODO: Replace with block module.
convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))]
convs += [leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(depth - 1 - i), kernel_size=(5,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_2_{}'.format(convs[-1].shape[1]), dim=model.dim))]
# Calculate Next Downsample Ratio
# Whoever can calculate this in a less dumb way than this gets a Fields Medal.
if max_size is None:
downsample_ratio = (2,) * model.dim
else:
reference_shape = []
current_shape = input_image.shape
for idx, cshape in enumerate(current_shape):
reference_shape += [current_shape[idx] // initial_size[idx]]
downsample_ratio = []
for size_idx, size in enumerate(max_size):
if size // initial_size[size_idx] > min(reference_shape):
downsample_ratio += [1]
else:
downsample_ratio += [2]
downsample_ratio = tuple(downsample_ratio)
convs[-1] = DnAveragePooling(convs[-1], downsample_ratio, dim=model.dim)
if i == 0 and transition:
transition_conv = DnAveragePooling(input_image, downsample_ratio, dim=model.dim)
transition_conv = leaky_relu(DnConv(transition_conv, output_dim=model.get_filter_num(depth - 1), kernel_size=(1,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_y_rgb_conv_{}'.format(transition_conv.shape[1]), dim=model.dim))
convs[-1] = alpha_transition * convs[-1] + (1 - alpha_transition) * transition_conv
convs += [minibatch_state_concat(convs[-1])]
convs[-1] = leaky_relu(DnConv(convs[-1], output_dim=model.get_filter_num(0), kernel_size=(3,) * model.dim, stride_size=(1,) * model.dim, name='discriminator_n_conv_1_{}'.format(convs[-1].shape[1]), dim=model.dim))
output = tf.reshape(convs[-1], [tf.shape(convs[-1])[0], np.prod(initial_size) * model.get_filter_num(0)])
# Currently erroring
# discriminate_output = dense(output, output_size=1, name='discriminator_n_fully')
discriminate_output = tf.layers.dense(output, 1, name='discriminator_n_1_fully')
return tf.nn.sigmoid(discriminate_output), discriminate_output
def unet(model, input_tensor, backend='tensorflow'):
from keras.layers.merge import concatenate
left_outputs = []
for level in range(model.depth):
filter_num = int(model.max_filter / (2 ** (model.depth - level)) / model.downsize_filters_factor)
if level == 0:
left_outputs += [DnConv(input_tensor, filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)]
left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)
else:
left_outputs += [DnMaxPooling(left_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
left_outputs[level] = DnConv(left_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_1'.format(level), backend=backend)
left_outputs[level] = DnConv(left_outputs[level], 2 * filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_downsampling_conv_{}_2'.format(level), backend=backend)
if model.dropout is not None and model.dropout != 0:
left_outputs[level] = DnDropout(model.dropout)(left_outputs[level])
if model.batch_norm:
left_outputs[level] = DnBatchNormalization(left_outputs[level])
right_outputs = [left_outputs[model.depth - 1]]
for level in range(model.depth):
filter_num = int(model.max_filter / (2 ** (level)) / model.downsize_filters_factor)
if level > 0:
right_outputs += [DnUpsampling(right_outputs[level - 1], pool_size=model.pool_size, dim=model.dim, backend=backend)]
right_outputs[level] = concatenate([right_outputs[level], left_outputs[model.depth - level - 1]], axis=model.dim + 1)
right_outputs[level] = DnConv(right_outputs[level], filter_num, model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_1'.format(level), backend=backend)
right_outputs[level] = DnConv(right_outputs[level], int(filter_num / 2), model.kernel_size, stride_size=(1,) * model.dim, activation=model.activation, padding=model.padding, dim=model.dim, name='unet_upsampling_conv_{}_2'.format(level), backend=backend)
else:
continue
if model.dropout is not None and model.dropout != 0:
right_outputs[level] = DnDropout(model.dropout)(right_outputs[level])
if model.batch_norm:
right_outputs[level] = DnBatchNormalization()(right_outputs[level])
output_layer = DnConv(right_outputs[level], 1, (1, ) * model.dim, stride_size=(1,) * model.dim, dim=model.dim, name='end_conv', backend=backend)
# TODO: Brainstorm better way to specify outputs
if model.input_tensor is not None:
return output_layer
return model.model
|
a63d67587036c3cd4abbb4e5ac741e295e4f3f77
|
224a034669068398e59962d6470fb72dbe20e8c9
|
/src/lightkurve/search.py
|
423d7f60421f880b7ab94ba9d7ff2f262a96411f
|
[
"MIT"
] |
permissive
|
lightkurve/lightkurve
|
b892b54ffbf3cb956f88300cb7d72b7e99fefdbf
|
7d485b69e9bbe58a1e7ba8d988387dc5d469ab36
|
refs/heads/main
| 2023-08-28T05:20:55.072927
| 2023-08-22T20:42:53
| 2023-08-22T20:42:53
| 118,387,904
| 148
| 66
|
MIT
| 2023-09-14T02:24:36
| 2018-01-22T00:49:59
|
Python
|
UTF-8
|
Python
| false
| false
| 56,015
|
py
|
search.py
|
"""Defines tools to retrieve Kepler data from the archive at MAST."""
from __future__ import division
import glob
import logging
import os
import re
import warnings
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import ascii
from astropy.table import Row, Table, join
from astropy.time import Time
from astropy.utils import deprecated
from memoization import cached
from requests import HTTPError
from . import PACKAGEDIR, conf, config
from .collections import LightCurveCollection, TargetPixelFileCollection
from .io import read
from .targetpixelfile import TargetPixelFile
from .utils import (
LightkurveDeprecationWarning,
LightkurveError,
LightkurveWarning,
suppress_stdout,
)
log = logging.getLogger(__name__)
__all__ = [
"search_targetpixelfile",
"search_lightcurve",
"search_lightcurvefile",
"search_tesscut",
"SearchResult",
]
# Which external links should we display in the SearchResult repr?
AUTHOR_LINKS = {
"Kepler": "https://archive.stsci.edu/kepler/data_products.html",
"K2": "https://archive.stsci.edu/k2/data_products.html",
"SPOC": "https://heasarc.gsfc.nasa.gov/docs/tess/pipeline.html",
"TESS-SPOC": "https://archive.stsci.edu/hlsp/tess-spoc",
"QLP": "https://archive.stsci.edu/hlsp/qlp",
"TASOC": "https://archive.stsci.edu/hlsp/tasoc",
"PATHOS": "https://archive.stsci.edu/hlsp/pathos",
"CDIPS": "https://archive.stsci.edu/hlsp/cdips",
"K2SFF": "https://archive.stsci.edu/hlsp/k2sff",
"EVEREST": "https://archive.stsci.edu/hlsp/everest",
"TESScut": "https://mast.stsci.edu/tesscut/",
"GSFC-ELEANOR-LITE": "https://archive.stsci.edu/hlsp/gsfc-eleanor-lite",
"TGLC": "https://archive.stsci.edu/hlsp/tglc",
}
REPR_COLUMNS_BASE = [
"#",
"mission",
"year",
"author",
"exptime",
"target_name",
"distance",
]
class SearchError(Exception):
pass
class SearchResult(object):
"""Container for the results returned by the search functions.
The purpose of this class is to provide a convenient way to inspect and
download products that have been identified using one of the data search
functions.
Parameters
----------
table : `~astropy.table.Table` object
Astropy table returned by a join of the astroquery `Observations.query_criteria()`
and `Observations.get_product_list()` methods.
"""
table = None
"""`~astropy.table.Table` containing the full search results returned by the MAST API."""
display_extra_columns = []
"""A list of extra columns to be included in the default display of the search result.
It can be configured in a few different ways.
For example, to include ``proposal_id`` in the default display, users can set it:
1. in the user's ``lightkurve.cfg`` file::
[search]
# The extra comma at the end is needed for a single extra column
search_result_display_extra_columns = proposal_id,
2. at run time::
import lightkurve as lk
lk.conf.search_result_display_extra_columns = ['proposal_id']
3. for a specific `SearchResult` object instance::
result.display_extra_columns = ['proposal_id']
See :ref:`configuration <api.config>` for more information.
"""
def __init__(self, table=None):
if table is None:
self.table = Table()
else:
self.table = table
if len(table) > 0:
self._add_columns()
self._sort_table()
self.display_extra_columns = conf.search_result_display_extra_columns
def _sort_table(self):
"""Sort the table of search results by distance, author, and filename.
The reason we include "author" in the sort criteria is that Lightkurve v1 only
showed data products created by the official pipelines (i.e. author equal to
"Kepler", "K2", or "SPOC"). To maintain backwards compatibility, we want to
show products from these authors at the top, so that `search.download()`
operations tend to download the same product in Lightkurve v1 vs v2.
This ordering is not a judgement on the quality of one product vs another,
because we love all pipelines!
"""
sort_priority = {"Kepler": 1, "K2": 1, "SPOC": 1, "TESS-SPOC": 2, "QLP": 3}
self.table["sort_order"] = [
sort_priority.get(author, 9) for author in self.table["author"]
]
self.table.sort(["distance", "year", "mission", "sort_order", "exptime"])
def _add_columns(self):
"""Adds a user-friendly index (``#``) column and adds column unit
and display format information.
"""
if "#" not in self.table.columns:
self.table["#"] = None
self.table["exptime"].unit = "s"
self.table["exptime"].format = ".0f"
self.table["distance"].unit = "arcsec"
# Add the year column from `t_min` or `productFilename`
year = np.floor(Time(self.table["t_min"], format="mjd").decimalyear)
self.table["year"] = year.astype(int)
# `t_min` is incorrect for Kepler products, so we extract year from the filename for those =(
for idx in np.where(self.table["author"] == "Kepler")[0]:
self.table["year"][idx] = re.findall(
r"\d+.(\d{4})\d+", self.table["productFilename"][idx]
)[0]
def __repr__(self, html=False):
def to_tess_gi_url(proposal_id):
if re.match("^G0[12].+", proposal_id) is not None:
return f"https://heasarc.gsfc.nasa.gov/docs/tess/approved-programs-primary.html#:~:text={proposal_id}"
elif re.match("^G0[34].+", proposal_id) is not None:
return f"https://heasarc.gsfc.nasa.gov/docs/tess/approved-programs-em1.html#:~:text={proposal_id}"
else:
return f"https://heasarc.gsfc.nasa.gov/docs/tess/approved-programs.html#:~:text={proposal_id}"
out = "SearchResult containing {} data products.".format(len(self.table))
if len(self.table) == 0:
return out
columns = REPR_COLUMNS_BASE
if self.display_extra_columns is not None:
columns = REPR_COLUMNS_BASE + self.display_extra_columns
# search_tesscut() has fewer columns, ensure we don't try to display columns that do not exist
columns = [c for c in columns if c in self.table.colnames]
self.table["#"] = [idx for idx in range(len(self.table))]
out += "\n\n" + "\n".join(self.table[columns].pformat(max_width=300, html=html))
# Make sure author names show up as clickable links
if html:
for author, url in AUTHOR_LINKS.items():
out = out.replace(f">{author}<", f"><a href='{url}'>{author}</a><")
# special HTML formating for TESS proposal_id
tess_table = self.table[self.table["project"] == "TESS"]
if "proposal_id" in tess_table.colnames:
proposal_id_col = np.unique(tess_table["proposal_id"])
else:
proposal_id_col = []
for p_ids in proposal_id_col:
# for CDIPS products, proposal_id is a np MaskedConstant, not a string
if p_ids == "N/A" or (not isinstance(p_ids, str)):
continue
# e.g., handle cases with multiple proposals, e.g., G12345_G67890
p_id_links = [
f"""\
<a href='{to_tess_gi_url(p_id)}'>{p_id}</a>\
"""
for p_id in p_ids.split("_")
]
out = out.replace(f">{p_ids}<", f">{' , '.join(p_id_links)}<")
return out
def _repr_html_(self):
return self.__repr__(html=True)
def __getitem__(self, key):
"""Implements indexing and slicing, e.g. SearchResult[2:5]."""
selection = self.table[key]
# Indexing a Table with an integer will return a Row
if isinstance(selection, Row):
selection = Table(selection)
return SearchResult(table=selection)
def __len__(self):
"""Returns the number of products in the SearchResult table."""
return len(self.table)
@property
def unique_targets(self):
"""Returns a table of targets and their RA & dec values produced by search"""
mask = ["target_name", "s_ra", "s_dec"]
return Table.from_pandas(
self.table[mask]
.to_pandas()
.drop_duplicates("target_name")
.reset_index(drop=True)
)
@property
def obsid(self):
"""MAST observation ID for each data product found."""
return np.asarray(np.unique(self.table["obsid"]), dtype="int64")
@property
def ra(self):
"""Right Ascension coordinate for each data product found."""
return self.table["s_ra"].data.data
@property
def dec(self):
"""Declination coordinate for each data product found."""
return self.table["s_dec"].data.data
@property
def mission(self):
"""Kepler quarter or TESS sector names for each data product found."""
return self.table["mission"].data
@property
def year(self):
"""Year the observation was made."""
return self.table["year"].data
@property
def author(self):
"""Pipeline name for each data product found."""
return self.table["author"].data
@property
def target_name(self):
"""Target name for each data product found."""
return self.table["target_name"].data
@property
def exptime(self):
"""Exposure time for each data product found."""
return self.table["exptime"].quantity
@property
def distance(self):
"""Distance from the search position for each data product found."""
return self.table["distance"].quantity
def _download_one(
self, table, quality_bitmask, download_dir, cutout_size, **kwargs
):
"""Private method used by `download()` and `download_all()` to download
exactly one file from the MAST archive.
Always returns a `TargetPixelFile` or `LightCurve` object.
"""
# Make sure astroquery uses the same level of verbosity
logging.getLogger("astropy").setLevel(log.getEffectiveLevel())
if download_dir is None:
download_dir = self._default_download_dir()
# if the SearchResult row is a TESScut entry, then download cutout
if "FFI Cutout" in table[0]["description"]:
try:
log.debug(
"Started downloading TESSCut for '{}' sector {}."
"".format(table[0]["target_name"], table[0]["sequence_number"])
)
path = self._fetch_tesscut_path(
table[0]["target_name"],
table[0]["sequence_number"],
download_dir,
cutout_size,
)
except Exception as exc:
msg = str(exc)
if "504" in msg:
# TESSCut will occasionally return a "504 Gateway Timeout
# error" when it is overloaded.
raise HTTPError(
"The TESS FFI cutout service at MAST appears "
"to be temporarily unavailable. It returned "
"the following error: {}".format(exc)
)
else:
raise SearchError(
"Unable to download FFI cutout. Desired target "
"coordinates may be too near the edge of the FFI."
"Error: {}".format(exc)
)
return read(
path, quality_bitmask=quality_bitmask, targetid=table[0]["targetid"]
)
else:
if cutout_size is not None:
warnings.warn(
"`cutout_size` can only be specified for TESS "
"Full Frame Image cutouts.",
LightkurveWarning,
)
# Whenever `astroquery.mast.Observations.download_products` is called,
# a HTTP request will be sent to determine the length of the file
# prior to checking if the file already exists in the local cache.
# For performance, we skip this HTTP request and immediately try to
# find the file in the cache. The path we check here is consistent
# with the one hard-coded inside `astroquery.mast.Observations._download_files()`
# in Astroquery v0.4.1. It would be good to submit a PR to astroquery
# so we can avoid having to use this hard-coded hack.
path = os.path.join(
download_dir.rstrip("/"),
"mastDownload",
table["obs_collection"][0],
table["obs_id"][0],
table["productFilename"][0],
)
if os.path.exists(path):
log.debug("File found in local cache.")
else:
from astroquery.mast import Observations
download_url = table[:1]["dataURL"][0]
log.debug("Started downloading {}.".format(download_url))
download_response = Observations.download_products(
table[:1], mrp_only=False, download_dir=download_dir
)[0]
if download_response["Status"] != "COMPLETE":
raise LightkurveError(
f"Download of {download_url} failed. "
f"MAST returns {download_response['Status']}: {download_response['Message']}"
)
path = download_response["Local Path"]
log.debug("Finished downloading.")
return read(path, quality_bitmask=quality_bitmask, **kwargs)
@suppress_stdout
def download(
self, quality_bitmask="default", download_dir=None, cutout_size=None, **kwargs
):
"""Download and open the first data product in the search result.
If multiple files are present in `SearchResult.table`, only the first
will be downloaded.
Parameters
----------
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`KeplerQualityFlags <lightkurve.utils.KeplerQualityFlags>` or :class:`TessQualityFlags <lightkurve.utils.TessQualityFlags>` class for details on the bitmasks.
download_dir : str, optional
Location where the data files will be stored.
If `None` is passed, the value from `cache_dir` configuration parameter is used,
with "~/.lightkurve/cache" as the default.
See `~lightkurve.config.get_cache_dir()` for details.
cutout_size : int, float or tuple, optional
Side length of cutout in pixels. Tuples should have dimensions (y, x).
Default size is (5, 5)
flux_column : str, optional
The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
kwargs : dict, optional
Extra keyword arguments passed on to the file format reader function.
Returns
-------
data : `TargetPixelFile` or `LightCurve` object
The first entry in the products table.
Raises
------
HTTPError
If the TESSCut service times out (i.e. returns HTTP status 504).
SearchError
If any other error occurs.
"""
if len(self.table) == 0:
warnings.warn(
"Cannot download from an empty search result.", LightkurveWarning
)
return None
if len(self.table) != 1:
warnings.warn(
"Warning: {} files available to download. "
"Only the first file has been downloaded. "
"Please use `download_all()` or specify additional "
"criteria (e.g. quarter, campaign, or sector) "
"to limit your search.".format(len(self.table)),
LightkurveWarning,
)
return self._download_one(
table=self.table[:1],
quality_bitmask=quality_bitmask,
download_dir=download_dir,
cutout_size=cutout_size,
**kwargs,
)
@suppress_stdout
def download_all(
self, quality_bitmask="default", download_dir=None, cutout_size=None, **kwargs
):
"""Download and open all data products in the search result.
This method will return a `~lightkurve.TargetPixelFileCollection` or
`~lightkurve.LightCurveCollection`.
Parameters
----------
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`KeplerQualityFlags <lightkurve.utils.KeplerQualityFlags>` or :class:`TessQualityFlags <lightkurve.utils.TessQualityFlags>` class for details on the bitmasks.
download_dir : str, optional
Location where the data files will be stored.
If `None` is passed, the value from `cache_dir` configuration parameter is used,
with "~/.lightkurve/cache" as the default.
See `~lightkurve.config.get_cache_dir()` for details.
cutout_size : int, float or tuple, optional
Side length of cutout in pixels. Tuples should have dimensions (y, x).
Default size is (5, 5)
flux_column : str, optional
The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
kwargs : dict, optional
Extra keyword arguments passed on to the file format reader function.
Returns
-------
collection : `~lightkurve.collections.Collection` object
Returns a `~lightkurve.LightCurveCollection` or
`~lightkurve.TargetPixelFileCollection`,
containing all entries in the products table
Raises
------
HTTPError
If the TESSCut service times out (i.e. returns HTTP status 504).
SearchError
If any other error occurs.
"""
if len(self.table) == 0:
warnings.warn(
"Cannot download from an empty search result.", LightkurveWarning
)
return None
log.debug("{} files will be downloaded.".format(len(self.table)))
products = []
for idx in range(len(self.table)):
products.append(
self._download_one(
table=self.table[idx : idx + 1],
quality_bitmask=quality_bitmask,
download_dir=download_dir,
cutout_size=cutout_size,
**kwargs,
)
)
if isinstance(products[0], TargetPixelFile):
return TargetPixelFileCollection(products)
else:
return LightCurveCollection(products)
def _default_download_dir(self):
return config.get_cache_dir()
def _fetch_tesscut_path(self, target, sector, download_dir, cutout_size):
"""Downloads TESS FFI cutout and returns path to local file.
Parameters
----------
download_dir : str
Path to location of `.lightkurve-cache` directory where downloaded
cutouts are stored
cutout_size : int, float or tuple
Side length of cutout in pixels. Tuples should have dimensions (y, x).
Default size is (5, 5)
Returns
-------
path : str
Path to locally downloaded cutout file
"""
from astroquery.mast import TesscutClass
coords = _resolve_object(target)
# Set cutout_size defaults
if cutout_size is None:
cutout_size = 5
# Check existence of `~/.lightkurve-cache/tesscut`
tesscut_dir = os.path.join(download_dir, "tesscut")
if not os.path.isdir(tesscut_dir):
# if it doesn't exist, make a new cache directory
try:
os.mkdir(tesscut_dir)
# downloads into default cache if OSError occurs
except OSError:
tesscut_dir = download_dir
# Resolve SkyCoord of given target
coords = _resolve_object(target)
# build path string name and check if it exists
# this is necessary to ensure cutouts are not downloaded multiple times
sec = TesscutClass().get_sectors(coordinates=coords)
sector_name = sec[sec["sector"] == sector]["sectorName"][0]
if isinstance(cutout_size, int):
size_str = str(int(cutout_size)) + "x" + str(int(cutout_size))
elif isinstance(cutout_size, tuple) or isinstance(cutout_size, list):
size_str = str(int(cutout_size[1])) + "x" + str(int(cutout_size[0]))
# search cache for file with matching ra, dec, and cutout size
# ra and dec are searched within 0.001 degrees of input target
ra_string = str(coords.ra.value)
dec_string = str(coords.dec.value)
matchstring = r"{}_{}*_{}*_{}_astrocut.fits".format(
sector_name,
ra_string[: ra_string.find(".") + 4],
dec_string[: dec_string.find(".") + 4],
size_str,
)
cached_files = glob.glob(os.path.join(tesscut_dir, matchstring))
# if any files exist, return the path to them instead of downloading
if len(cached_files) > 0:
path = cached_files[0]
log.debug("Cached file found.")
# otherwise the file will be downloaded
else:
cutout_path = TesscutClass().download_cutouts(
coordinates=coords, size=cutout_size, sector=sector, path=tesscut_dir
)
path = cutout_path[0][0] # the cutoutpath already contains testcut_dir
log.debug("Finished downloading.")
return path
@cached
def search_targetpixelfile(
target,
radius=None,
exptime=None,
cadence=None,
mission=("Kepler", "K2", "TESS"),
author=None,
quarter=None,
month=None,
campaign=None,
sector=None,
limit=None,
):
"""Search the `MAST data archive <https://archive.stsci.edu>`_ for target pixel files.
This function fetches a data table that lists the Target Pixel Files (TPFs)
that fall within a region of sky centered around the position of `target`
and within a cone of a given `radius`. If no value is provided for `radius`,
only a single target will be returned.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
Target around which to search. Valid inputs include:
* The name of the object as a string, e.g. "Kepler-10".
* The KIC or EPIC identifier as an integer, e.g. 11904151.
* A coordinate string in decimal format, e.g. "285.67942179 +50.24130576".
* A coordinate string in sexagesimal format, e.g. "19:02:43.1 +50:14:28.7".
* An `astropy.coordinates.SkyCoord` object.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
exptime : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g., ``exptime=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
cadence : 'long', 'short', 'fast', or float
Synonym for `exptime`. Will likely be deprecated in the future.
mission : str, tuple of str
'Kepler', 'K2', or 'TESS'. By default, all will be returned.
author : str, tuple of str, or "any"
Author of the data product (`provenance_name` in the MAST API).
Official Kepler, K2, and TESS pipeline products have author names
'Kepler', 'K2', and 'SPOC'.
By default, all light curves are returned regardless of the author.
quarter, campaign, sector : int, list of ints
Kepler Quarter, K2 Campaign, or TESS Sector number.
By default all quarters/campaigns/sectors will be returned.
month : 1, 2, 3, 4 or list of int
For Kepler's prime mission, there are three short-cadence
TargetPixelFiles for each quarter, each covering one month.
Hence, if ``exptime='short'`` you can specify month=1, 2, 3, or 4.
By default all months will be returned.
limit : int
Maximum number of products to return.
Returns
-------
result : :class:`SearchResult` object
Object detailing the data products found.
Examples
--------
This example demonstrates how to use the `search_targetpixelfile()` function
to query and download data. Before instantiating a
`~lightkurve.targetpixelfile.KeplerTargetPixelFile` object or
downloading any science products, we can identify potential desired targets
with `search_targetpixelfile()`::
>>> search_result = search_targetpixelfile('Kepler-10') # doctest: +SKIP
>>> print(search_result) # doctest: +SKIP
The above code will query mast for Target Pixel Files (TPFs) available for
the known planet system Kepler-10, and display a table containing the
available science products. Because Kepler-10 was observed during 15 Quarters,
the table will have 15 entries. To obtain a
`~lightkurve.collections.TargetPixelFileCollection` object containing all
15 observations, use::
>>> search_result.download_all() # doctest: +SKIP
or we can download a single product by limiting our search::
>>> tpf = search_targetpixelfile('Kepler-10', quarter=2).download() # doctest: +SKIP
The above line of code will only download Quarter 2 and create a single
`~lightkurve.targetpixelfile.KeplerTargetPixelFile` object called `tpf`.
We can also pass a radius into `search_targetpixelfile` to perform a cone search::
>>> search_targetpixelfile('Kepler-10', radius=100).targets # doctest: +SKIP
This will display a table containing all targets within 100 arcseconds of Kepler-10.
We can download a `~lightkurve.collections.TargetPixelFileCollection` object
containing all available products for these targets in Quarter 4 with::
>>> search_targetpixelfile('Kepler-10', radius=100, quarter=4).download_all() # doctest: +SKIP
"""
try:
return _search_products(
target,
radius=radius,
filetype="Target Pixel",
exptime=exptime or cadence,
mission=mission,
provenance_name=author,
quarter=quarter,
month=month,
campaign=campaign,
sector=sector,
limit=limit,
)
except SearchError as exc:
log.error(exc)
return SearchResult(None)
@deprecated(
"2.0", alternative="search_lightcurve()", warning_type=LightkurveDeprecationWarning
)
def search_lightcurvefile(*args, **kwargs):
return search_lightcurve(*args, **kwargs)
@cached
def search_lightcurve(
target,
radius=None,
exptime=None,
cadence=None,
mission=("Kepler", "K2", "TESS"),
author=None,
quarter=None,
month=None,
campaign=None,
sector=None,
limit=None,
):
"""Search the `MAST data archive <https://archive.stsci.edu>`_ for light curves.
This function fetches a data table that lists the Light Curve Files
that fall within a region of sky centered around the position of `target`
and within a cone of a given `radius`. If no value is provided for `radius`,
only a single target will be returned.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
Target around which to search. Valid inputs include:
* The name of the object as a string, e.g. "Kepler-10".
* The KIC or EPIC identifier as an integer, e.g. 11904151.
* A coordinate string in decimal format, e.g. "285.67942179 +50.24130576".
* A coordinate string in sexagesimal format, e.g. "19:02:43.1 +50:14:28.7".
* An `astropy.coordinates.SkyCoord` object.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
exptime : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g., ``exptime=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
cadence : 'long', 'short', 'fast', or float
Synonym for `exptime`. This keyword will likely be deprecated in the future.
mission : str, tuple of str
'Kepler', 'K2', or 'TESS'. By default, all will be returned.
author : str, tuple of str, or "any"
Author of the data product (`provenance_name` in the MAST API).
Official Kepler, K2, and TESS pipeline products have author names
'Kepler', 'K2', and 'SPOC'.
Community-provided products that are supported include 'K2SFF', 'EVEREST'.
By default, all light curves are returned regardless of the author.
quarter, campaign, sector : int, list of ints
Kepler Quarter, K2 Campaign, or TESS Sector number.
By default all quarters/campaigns/sectors will be returned.
month : 1, 2, 3, 4 or list of int
For Kepler's prime mission, there are three short-cadence
TargetPixelFiles for each quarter, each covering one month.
Hence, if ``exptime='short'`` you can specify month=1, 2, 3, or 4.
By default all months will be returned.
limit : int
Maximum number of products to return.
Returns
-------
result : :class:`SearchResult` object
Object detailing the data products found.
Examples
--------
This example demonstrates how to use the `search_lightcurve()` function to
query and download data. Before instantiating a `LightCurve` object or
downloading any science products, we can identify potential desired targets with
`search_lightcurve`::
>>> from lightkurve import search_lightcurve # doctest: +SKIP
>>> search_result = search_lightcurve("Kepler-10") # doctest: +SKIP
>>> print(search_result) # doctest: +SKIP
The above code will query mast for lightcurve files available for the known
planet system Kepler-10, and display a table containing the available
data products. Because Kepler-10 was observed in multiple quarters and sectors
by both Kepler and TESS, the search will return many dozen results.
If we want to narrow down the search to only return Kepler light curves
in long cadence, we can use::
>>> search_result = search_lightcurve("Kepler-10", author="Kepler", exptime=1800) # doctest: +SKIP
>>> print(search_result) # doctest: +SKIP
That is better, we now see 15 light curves corresponding to 15 Kepler quarters.
If we want to download a `~lightkurve.collections.LightCurveCollection` object containing all
15 observations, use::
>>> search_result.download_all() # doctest: +SKIP
or we can specify the downloaded products by selecting a specific row using
rectangular brackets, for example::
>>> lc = search_result[2].download() # doctest: +SKIP
The above line of code will only search and download Quarter 2 data and
create a `LightCurve` object called lc.
We can also pass a radius into `search_lightcurve` to perform a cone search::
>>> search_lightcurve('Kepler-10', radius=100, quarter=4, exptime=1800) # doctest: +SKIP
This will display a table containing all targets within 100 arcseconds of
Kepler-10 and in Quarter 4. We can then download a
`~lightkurve.collections.LightCurveFile` containing all these
light curves using::
>>> search_lightcurve('Kepler-10', radius=100, quarter=4, exptime=1800).download_all() # doctest: +SKIP
"""
try:
return _search_products(
target,
radius=radius,
filetype="Lightcurve",
exptime=exptime or cadence,
mission=mission,
provenance_name=author,
quarter=quarter,
month=month,
campaign=campaign,
sector=sector,
limit=limit,
)
except SearchError as exc:
log.error(exc)
return SearchResult(None)
@cached
def search_tesscut(target, sector=None):
"""Search the `MAST TESSCut service <https://mast.stsci.edu/tesscut/>`_ for a region
of sky that is available as a TESS Full Frame Image cutout.
This feature uses the `TESScut service <https://mast.stsci.edu/tesscut/>`_
provided by the TESS data archive at MAST. If you use this service in
your work, please `cite TESScut <https://ascl.net/code/v/2239>`_ in your
publications.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
Target around which to search. Valid inputs include:
* The name of the object as a string, e.g. "Kepler-10".
* The KIC or EPIC identifier as an integer, e.g. 11904151.
* A coordinate string in decimal format, e.g. "285.67942179 +50.24130576".
* A coordinate string in sexagesimal format, e.g. "19:02:43.1 +50:14:28.7".
* An `astropy.coordinates.SkyCoord` object.
sector : int or list
TESS Sector number. Default (None) will return all available sectors. A
list of desired sectors can also be provided.
Returns
-------
result : :class:`SearchResult` object
Object detailing the data products found.
"""
try:
return _search_products(target, filetype="ffi", mission="TESS", sector=sector)
except SearchError as exc:
log.error(exc)
return SearchResult(None)
def _search_products(
target,
radius=None,
filetype="Lightcurve",
mission=("Kepler", "K2", "TESS"),
provenance_name=None,
exptime=(0, 9999),
quarter=None,
month=None,
campaign=None,
sector=None,
limit=None,
**extra_query_criteria,
):
"""Helper function which returns a SearchResult object containing MAST
products that match several criteria.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
See docstrings above.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
filetype : {'Target pixel', 'Lightcurve', 'FFI'}
Type of files queried at MAST.
exptime : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g., ``exptime=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
mission : str, list of str
'Kepler', 'K2', or 'TESS'. By default, all will be returned.
provenance_name : str, list of str
Provenance of the data product. Defaults to official products, i.e.
('Kepler', 'K2', 'SPOC'). Community-provided products such as 'K2SFF'
are supported as well.
quarter, campaign, sector : int, list of ints
Kepler Quarter, K2 Campaign, or TESS Sector number.
By default all quarters/campaigns/sectors will be returned.
month : 1, 2, 3, 4 or list of int
For Kepler's prime mission, there are three short-cadence
TargetPixelFiles for each quarter, each covering one month.
Hence, if ``exptime='short'`` you can specify month=1, 2, 3, or 4.
By default all months will be returned.
limit : int
Maximum number of products to return
Returns
-------
SearchResult : :class:`SearchResult` object.
"""
if isinstance(target, int):
if (0 < target) and (target < 13161030):
log.warning(
"Warning: {} may refer to a different Kepler or TESS target. "
"Please add the prefix 'KIC' or 'TIC' to disambiguate."
"".format(target)
)
elif (0 < 200000000) and (target < 251813739):
log.warning(
"Warning: {} may refer to a different K2 or TESS target. "
"Please add the prefix 'EPIC' or 'TIC' to disambiguate."
"".format(target)
)
# Specifying quarter, campaign, or quarter should constrain the mission
if quarter:
mission = "Kepler"
if campaign:
mission = "K2"
if sector:
mission = "TESS"
# Ensure mission is a list
mission = np.atleast_1d(mission).tolist()
# Avoid filtering on `provenance_name` if `author` equals "any" or "all"
if provenance_name in ("any", "all") or provenance_name is None:
provenance_name = None
else:
provenance_name = np.atleast_1d(provenance_name).tolist()
# Speed up by restricting the MAST query if we don't want FFI image data
extra_query_criteria = {}
if filetype in ["Lightcurve", "Target Pixel"]:
# At MAST, non-FFI Kepler pipeline products are known as "cube" products,
# and non-FFI TESS pipeline products are listed as "timeseries".
extra_query_criteria["dataproduct_type"] = ["cube", "timeseries"]
# Make sure `search_tesscut` always performs a cone search (i.e. always
# passed a radius value), because strict target name search does not apply.
if filetype.lower() == "ffi" and radius is None:
radius = 0.0001 * u.arcsec
observations = _query_mast(
target,
radius=radius,
project=mission,
provenance_name=provenance_name,
exptime=exptime,
sequence_number=campaign or sector,
**extra_query_criteria,
)
log.debug(
"MAST found {} observations. "
"Now querying MAST for the corresponding data products."
"".format(len(observations))
)
if len(observations) == 0:
raise SearchError('No data found for target "{}".'.format(target))
# Light curves and target pixel files
if filetype.lower() != "ffi":
from astroquery.mast import Observations
products = Observations.get_product_list(observations)
result = join(
observations,
products,
keys="obs_id",
join_type="right",
uniq_col_name="{col_name}{table_name}",
table_names=["", "_products"],
)
result.sort(["distance", "obs_id"])
# Add the user-friendly 'author' column (synonym for 'provenance_name')
result["author"] = result["provenance_name"]
# Add the user-friendly 'mission' column
result["mission"] = None
obs_prefix = {"Kepler": "Quarter", "K2": "Campaign", "TESS": "Sector"}
for idx in range(len(result)):
obs_project = result["project"][idx]
tmp_seqno = result["sequence_number"][idx]
obs_seqno = f"{tmp_seqno:02d}" if tmp_seqno else ""
# Kepler sequence_number values were not populated at the time of
# writing this code, so we parse them from the description field.
if obs_project == "Kepler" and result["sequence_number"].mask[idx]:
try:
tmp_seqno = re.findall(r".*Q(\d+)", result["description"][idx])[0]
obs_seqno = f"{int(tmp_seqno):02d}"
except IndexError:
obs_seqno = ""
# K2 campaigns 9, 10, and 11 were split into two sections, which are
# listed separately in the table with suffixes "a" and "b"
if obs_project == "K2" and result["sequence_number"][idx] in [9, 10, 11]:
for half, letter in zip([1, 2], ["a", "b"]):
if f"c{tmp_seqno}{half}" in result["productFilename"][idx]:
obs_seqno = f"{int(tmp_seqno):02d}{letter}"
result["mission"][idx] = "{} {} {}".format(
obs_project, obs_prefix.get(obs_project, ""), obs_seqno
)
masked_result = _filter_products(
result,
filetype=filetype,
campaign=campaign,
quarter=quarter,
exptime=exptime,
project=mission,
provenance_name=provenance_name,
month=month,
sector=sector,
limit=limit,
)
log.debug("MAST found {} matching data products.".format(len(masked_result)))
masked_result["distance"].info.format = ".1f" # display <0.1 arcsec
return SearchResult(masked_result)
# Full Frame Images
else:
cutouts = []
for idx in np.where(["TESS FFI" in t for t in observations["target_name"]])[0]:
# if target passed in is a SkyCoord object, convert to RA, dec pair
if isinstance(target, SkyCoord):
target = "{}, {}".format(target.ra.deg, target.dec.deg)
# pull sector numbers
s = observations["sequence_number"][idx]
# if the desired sector is available, add a row
if s in np.atleast_1d(sector) or sector is None:
cutouts.append(
{
"description": f"TESS FFI Cutout (sector {s})",
"mission": f"TESS Sector {s:02d}",
"target_name": str(target),
"targetid": str(target),
"t_min": observations["t_min"][idx],
"exptime": observations["exptime"][idx],
"productFilename": "TESScut",
"provenance_name": "TESScut",
"author": "TESScut",
"distance": 0.0,
"sequence_number": s,
"project": "TESS",
"obs_collection": "TESS",
}
)
if len(cutouts) > 0:
log.debug("Found {} matching cutouts.".format(len(cutouts)))
masked_result = Table(cutouts)
masked_result.sort(["distance", "sequence_number"])
else:
masked_result = None
return SearchResult(masked_result)
def _query_mast(
target,
radius=None,
project=("Kepler", "K2", "TESS"),
provenance_name=None,
exptime=(0, 9999),
sequence_number=None,
**extra_query_criteria,
):
"""Helper function which wraps `astroquery.mast.Observations.query_criteria()`
to return a table of all Kepler/K2/TESS observations of a given target.
By default only the official data products are returned, but this can be
adjusted by adding alternative data product names into `provenance_name`.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
See docstrings above.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
project : str, list of str
Mission name. Typically 'Kepler', 'K2', or 'TESS'.
This parameter is case-insensitive.
provenance_name : str, list of str
Provenance of the observation. Common options include 'Kepler', 'K2',
'SPOC', 'K2SFF', 'EVEREST', 'KEPSEISMIC'.
This parameter is case-insensitive.
exptime : (float, float) tuple
Exposure time range in seconds. Common values include `(59, 61)`
for Kepler short cadence and `(1799, 1801)` for Kepler long cadence.
sequence_number : int, list of int
Quarter, Campaign, or Sector number.
**extra_query_criteria : kwargs
Extra criteria to be passed to `astroquery.mast.Observations.query_criteria`.
Returns
-------
obs : astropy.Table
Table detailing the available observations on MAST.
"""
# Local astroquery import because the package is not used elsewhere
from astroquery.exceptions import NoResultsWarning, ResolverError
from astroquery.mast import Observations
# If passed a SkyCoord, convert it to an "ra, dec" string for MAST
if isinstance(target, SkyCoord):
target = "{}, {}".format(target.ra.deg, target.dec.deg)
# We pass the following `query_criteria` to MAST regardless of whether
# we search by position or target name:
query_criteria = {"project": project, **extra_query_criteria}
if provenance_name is not None:
query_criteria["provenance_name"] = provenance_name
if sequence_number is not None:
query_criteria["sequence_number"] = sequence_number
if exptime is not None:
query_criteria["t_exptime"] = exptime
# If an exact KIC ID is passed, we will search by the exact `target_name`
# under which MAST will know the object to prevent source confusion.
# For discussion, see e.g. GitHub issues #148, #718.
exact_target_name = None
target_lower = str(target).lower()
# Was a Kepler target ID passed?
kplr_match = re.match(r"^(kplr|kic) ?(\d+)$", target_lower)
if kplr_match:
exact_target_name = f"kplr{kplr_match.group(2).zfill(9)}"
# Was a K2 target ID passed?
ktwo_match = re.match(r"^(ktwo|epic) ?(\d+)$", target_lower)
if ktwo_match:
exact_target_name = f"ktwo{ktwo_match.group(2).zfill(9)}"
# Was a TESS target ID passed?
tess_match = re.match(r"^(tess|tic) ?(\d+)$", target_lower)
if tess_match:
exact_target_name = f"{tess_match.group(2).zfill(9)}"
if exact_target_name and radius is None:
log.debug(
"Started querying MAST for observations with the exact "
f"target_name='{exact_target_name}'."
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=NoResultsWarning)
warnings.filterwarnings("ignore", message="t_exptime is continuous")
obs = Observations.query_criteria(
target_name=exact_target_name, **query_criteria
)
if len(obs) > 0:
# We use `exptime` as an alias for `t_exptime`
obs["exptime"] = obs["t_exptime"]
# astroquery does not report distance when querying by `target_name`;
# we add it here so that the table returned always has this column.
obs["distance"] = 0.0
return obs
else:
log.debug(f"No observations found. Now performing a cone search instead.")
# If the above did not return a result, then do a cone search using the MAST name resolver
# `radius` defaults to 0.0001 and unit arcsecond
if radius is None:
radius = 0.0001 * u.arcsec
elif not isinstance(radius, u.quantity.Quantity):
radius = radius * u.arcsec
query_criteria["radius"] = str(radius.to(u.deg))
try:
log.debug(
"Started querying MAST for observations within "
f"{radius.to(u.arcsec)} arcsec of objectname='{target}'."
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=NoResultsWarning)
warnings.filterwarnings("ignore", message="t_exptime is continuous")
obs = Observations.query_criteria(objectname=target, **query_criteria)
obs.sort("distance")
# We use `exptime` as an alias for `t_exptime`
obs["exptime"] = obs["t_exptime"]
return obs
except ResolverError as exc:
# MAST failed to resolve the object name to sky coordinates
raise SearchError(exc) from exc
def _filter_products(
products,
campaign=None,
quarter=None,
month=None,
sector=None,
exptime=None,
limit=None,
project=("Kepler", "K2", "TESS"),
provenance_name=None,
filetype="Target Pixel",
):
"""Helper function which filters a SearchResult's products table by one or
more criteria.
Parameters
----------
products : `astropy.table.Table` object
Astropy table containing data products returned by MAST
campaign : int or list
Desired campaign of observation for data products
quarter : int or list
Desired quarter of observation for data products
month : int or list
Desired month of observation for data products
exptime : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g., ``exptime=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
filetype : str
Type of files queried at MAST (`Target Pixel` or `Lightcurve`).
Returns
-------
products : `astropy.table.Table` object
Masked astropy table containing desired data products
"""
if provenance_name is None: # apply all filters
provenance_lower = ("kepler", "k2", "spoc")
else:
provenance_lower = [p.lower() for p in np.atleast_1d(provenance_name)]
mask = np.ones(len(products), dtype=bool)
# Kepler data needs a special filter for quarter and month
mask &= ~np.array(
[prov.lower() == "kepler" for prov in products["provenance_name"]]
)
if "kepler" in provenance_lower and campaign is None and sector is None:
mask |= _mask_kepler_products(products, quarter=quarter, month=month)
# HLSP products need to be filtered by extension
if filetype.lower() == "lightcurve":
mask &= np.array(
[uri.lower().endswith("lc.fits") for uri in products["productFilename"]]
)
elif filetype.lower() == "target pixel":
mask &= np.array(
[
uri.lower().endswith(("tp.fits", "targ.fits.gz"))
for uri in products["productFilename"]
]
)
elif filetype.lower() == "ffi":
mask &= np.array(["TESScut" in desc for desc in products["description"]])
# Allow only fits files
mask &= np.array(
[
uri.lower().endswith("fits") or uri.lower().endswith("fits.gz")
for uri in products["productFilename"]
]
)
# Filter by cadence
mask &= _mask_by_exptime(products, exptime)
products = products[mask]
products.sort(["distance", "productFilename"])
if limit is not None:
return products[0:limit]
return products
def _mask_kepler_products(products, quarter=None, month=None):
"""Returns a mask flagging the Kepler products that match the criteria."""
mask = np.array([proj.lower() == "kepler" for proj in products["provenance_name"]])
if mask.sum() == 0:
return mask
# Identify quarter by the description.
# This is necessary because the `sequence_number` field was not populated
# for Kepler prime data at the time of writing this function.
if quarter is not None:
quarter_mask = np.zeros(len(products), dtype=bool)
for q in np.atleast_1d(quarter):
quarter_mask |= np.array(
[
desc.lower().replace("-", "").endswith("q{}".format(q))
for desc in products["description"]
]
)
mask &= quarter_mask
# For Kepler short cadence data the month can be specified
if month is not None:
month = np.atleast_1d(month)
# Get the short cadence date lookup table.
table = ascii.read(
os.path.join(PACKAGEDIR, "data", "short_cadence_month_lookup.csv")
)
# The following line is needed for systems where the default integer type
# is int32 (e.g. Windows/Appveyor), the column will then be interpreted
# as string which makes the test fail.
table["StartTime"] = table["StartTime"].astype(str)
# Grab the dates of each of the short cadence files.
# Make sure every entry has the correct month
is_shortcadence = mask & np.asarray(
["Short" in desc for desc in products["description"]]
)
for idx in np.where(is_shortcadence)[0]:
quarter = int(
products["description"][idx].split(" - ")[-1][1:].replace("-", "")
)
date = products["dataURI"][idx].split("/")[-1].split("-")[1].split("_")[0]
permitted_dates = []
for m in month:
try:
permitted_dates.append(
table["StartTime"][
np.where(
(table["Month"] == m) & (table["Quarter"] == quarter)
)[0][0]
]
)
except IndexError:
pass
if not (date in permitted_dates):
mask[idx] = False
return mask
def _mask_by_exptime(products, exptime):
"""Helper function to filter by exposure time."""
mask = np.ones(len(products), dtype=bool)
if isinstance(exptime, (int, float)):
mask &= products["exptime"] == exptime
elif isinstance(exptime, str):
exptime = exptime.lower()
if exptime in ["fast"]:
mask &= products["exptime"] < 60
elif exptime in ["short"]:
mask &= (products["exptime"] >= 60) & (products["exptime"] < 300)
elif exptime in ["long", "ffi"]:
mask &= products["exptime"] >= 300
return mask
def _resolve_object(target):
"""Ask MAST to resolve an object string to a set of coordinates."""
from astroquery.mast import MastClass
# Note: `_resolve_object` was renamed `resolve_object` in astroquery 0.3.10 (2019)
return MastClass().resolve_object(target)
|
1220b9034e76168044ed803ab505bd1406d09f36
|
df1254b56f35b24644e00493c50d4b6eb3c15b7b
|
/colour/io/image.py
|
4e067e4fefcb3609cfba139ccdb6474e1e51ef56
|
[
"BSD-3-Clause"
] |
permissive
|
colour-science/colour
|
908400b227cf81668675e41099256ce50b23ae4b
|
1fdf3b3042922e8d4f86b989b00a06e7e5d81102
|
refs/heads/develop
| 2023-09-01T23:17:07.186869
| 2023-08-26T09:40:45
| 2023-08-26T09:40:45
| 17,114,363
| 1,756
| 301
|
BSD-3-Clause
| 2023-09-14T10:24:37
| 2014-02-23T18:55:40
|
Python
|
UTF-8
|
Python
| false
| false
| 22,058
|
py
|
image.py
|
"""
Image Input / Output Utilities
==============================
Defines the image related input / output utilities objects.
"""
from __future__ import annotations
import numpy as np
from dataclasses import dataclass, field
from colour.hints import (
Any,
ArrayLike,
DTypeReal,
Literal,
NDArrayFloat,
NDArrayReal,
Optional,
Sequence,
TYPE_CHECKING,
Tuple,
Type,
cast,
)
from colour.utilities import (
CanonicalMapping,
as_float_array,
as_int_array,
attest,
is_openimageio_installed,
filter_kwargs,
optional,
required,
tstack,
usage_warning,
validate_method,
)
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "colour-developers@colour-science.org"
__status__ = "Production"
__all__ = [
"BitDepth_Specification",
"ImageAttribute_Specification",
"convert_bit_depth",
"read_image_OpenImageIO",
"read_image_Imageio",
"READ_IMAGE_METHODS",
"read_image",
"write_image_OpenImageIO",
"write_image_Imageio",
"WRITE_IMAGE_METHODS",
"write_image",
"as_3_channels_image",
]
@dataclass(frozen=True)
class BitDepth_Specification:
"""
Define a bit-depth specification.
Parameters
----------
name
Attribute name.
numpy
Object representing the *Numpy* bit-depth.
openimageio
Object representing the *OpenImageIO* bit-depth.
"""
name: str
numpy: Type[DTypeReal]
openimageio: Any
@dataclass
class ImageAttribute_Specification:
"""
Define an image specification attribute.
Parameters
----------
name
Attribute name.
value
Attribute value.
type_
Attribute type as an *OpenImageIO* :class:`TypeDesc` class instance.
"""
name: str
value: Any
type_: Optional[ # noqa: UP007
OpenImageIO.TypeDesc # pyright: ignore # noqa: F821
] = field( # noqa: RUF100
default_factory=lambda: None
)
if is_openimageio_installed(): # pragma: no cover
from OpenImageIO import UINT8, UINT16, HALF, FLOAT, DOUBLE
MAPPING_BIT_DEPTH: CanonicalMapping = CanonicalMapping(
{
"uint8": BitDepth_Specification("uint8", np.uint8, UINT8),
"uint16": BitDepth_Specification("uint16", np.uint16, UINT16),
"float16": BitDepth_Specification("float16", np.float16, HALF),
"float32": BitDepth_Specification("float32", np.float32, FLOAT),
"float64": BitDepth_Specification("float64", np.float64, DOUBLE),
}
)
if not TYPE_CHECKING and hasattr(np, "float128"): # pragma: no cover
MAPPING_BIT_DEPTH["float128"] = BitDepth_Specification(
"float128", np.float128, DOUBLE
)
else: # pragma: no cover
MAPPING_BIT_DEPTH: CanonicalMapping = CanonicalMapping(
{
"uint8": BitDepth_Specification("uint8", np.uint8, None),
"uint16": BitDepth_Specification("uint16", np.uint16, None),
"float16": BitDepth_Specification("float16", np.float16, None),
"float32": BitDepth_Specification("float32", np.float32, None),
"float64": BitDepth_Specification("float64", np.float64, None),
}
)
if not TYPE_CHECKING and hasattr(np, "float128"): # pragma: no cover
MAPPING_BIT_DEPTH["float128"] = BitDepth_Specification(
"float128", np.float128, None
)
def convert_bit_depth(
a: ArrayLike,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
) -> NDArrayReal:
"""
Convert given array to given bit-depth, the current bit-depth of the array
is used to determine the appropriate conversion path.
Parameters
----------
a
Array to convert to given bit-depth.
bit_depth
Bit-depth.
Returns
-------
:class`numpy.ndarray`
Converted array.
Examples
--------
>>> a = np.array([0.0, 0.5, 1.0])
>>> convert_bit_depth(a, "uint8")
array([ 0, 128, 255], dtype=uint8)
>>> convert_bit_depth(a, "uint16")
array([ 0, 32768, 65535], dtype=uint16)
>>> convert_bit_depth(a, "float16")
array([ 0. , 0.5, 1. ], dtype=float16)
>>> a = np.array([0, 128, 255], dtype=np.uint8)
>>> convert_bit_depth(a, "uint16")
array([ 0, 32896, 65535], dtype=uint16)
>>> convert_bit_depth(a, "float32") # doctest: +ELLIPSIS
array([ 0. , 0.501960..., 1. ], dtype=float32)
"""
a = np.asarray(a)
bit_depths = ", ".join(sorted(MAPPING_BIT_DEPTH.keys()))
attest(
bit_depth in bit_depths,
f'Incorrect bit-depth was specified, it must be one of: "{bit_depths}"!',
)
attest(
str(a.dtype) in bit_depths,
f'Image bit-depth must be one of: "{bit_depths}"!',
)
source_dtype = str(a.dtype)
target_dtype = MAPPING_BIT_DEPTH[bit_depth].numpy
if source_dtype == "uint8":
if bit_depth == "uint16":
a = (a * 257).astype(target_dtype)
elif bit_depth in ("float16", "float32", "float64", "float128"):
a = (a / 255).astype(target_dtype)
elif source_dtype == "uint16":
if bit_depth == "uint8":
a = (a / 257).astype(target_dtype)
elif bit_depth in ("float16", "float32", "float64", "float128"):
a = (a / 65535).astype(target_dtype)
elif source_dtype in ("float16", "float32", "float64", "float128"):
if bit_depth == "uint8":
a = np.around(a * 255).astype(target_dtype)
elif bit_depth == "uint16":
a = np.around(a * 65535).astype(target_dtype)
elif bit_depth in ("float16", "float32", "float64", "float128"):
a = a.astype(target_dtype)
return a
@required("OpenImageIO")
def read_image_OpenImageIO(
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
attributes: bool = False,
) -> NDArrayReal | Tuple[NDArrayReal, list]:
"""
Read the image data at given path using *OpenImageIO*.
Parameters
----------
path
Image path.
bit_depth
Returned image bit-depth, the bit-depth conversion behaviour is driven
directly by *OpenImageIO*, this definition only converts to the
relevant data type after reading.
attributes
Whether to return the image attributes.
Returns
-------
:class`numpy.ndarray` or :class:`tuple`
Image data or tuple of image data and list of
:class:`colour.io.ImageAttribute_Specification` class instances.
Notes
-----
- For convenience, single channel images are squeezed to 2D arrays.
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMS_Test_Pattern.exr",
... )
>>> image = read_image_OpenImageIO(path) # doctest: +SKIP
""" # noqa: D405, D407, D410, D411
from OpenImageIO import ImageInput
path = str(path)
bit_depth_specification = MAPPING_BIT_DEPTH[bit_depth]
image_input = ImageInput.open(path)
specification = image_input.spec()
shape = (
specification.height,
specification.width,
specification.nchannels,
)
image = image_input.read_image(bit_depth_specification.openimageio)
image_input.close()
image = np.array(image, dtype=bit_depth_specification.numpy).reshape(shape)
image = cast(NDArrayReal, np.squeeze(image))
if attributes:
extra_attributes = []
for attribute in specification.extra_attribs:
extra_attributes.append(
ImageAttribute_Specification(
attribute.name, attribute.value, attribute.type
)
)
return image, extra_attributes
else:
return image
def read_image_Imageio(
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
**kwargs: Any,
) -> NDArrayReal:
"""
Read the image data at given path using *Imageio*.
Parameters
----------
path
Image path.
bit_depth
Returned image bit-depth, the image data is converted with
:func:`colour.io.convert_bit_depth` definition after reading the
image.
Other Parameters
----------------
kwargs
Keywords arguments.
Returns
-------
:class`numpy.ndarray`
Image data.
Notes
-----
- For convenience, single channel images are squeezed to 2D arrays.
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMS_Test_Pattern.exr",
... )
>>> image = read_image_Imageio(path)
>>> image.shape # doctest: +SKIP
(1267, 1274, 3)
>>> image.dtype
dtype('float32')
"""
from imageio import imread
image = np.squeeze(imread(path, **kwargs))
return convert_bit_depth(image, bit_depth)
READ_IMAGE_METHODS: CanonicalMapping = CanonicalMapping(
{
"Imageio": read_image_Imageio,
"OpenImageIO": read_image_OpenImageIO,
}
)
READ_IMAGE_METHODS.__doc__ = """
Supported image read methods.
"""
def read_image(
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
method: Literal["Imageio", "OpenImageIO"] | str = "OpenImageIO",
**kwargs: Any,
) -> NDArrayReal:
"""
Read the image data at given path using given method.
Parameters
----------
path
Image path.
bit_depth
Returned image bit-depth, for the *Imageio* method, the image data is
converted with :func:`colour.io.convert_bit_depth` definition after
reading the image, for the *OpenImageIO* method, the bit-depth
conversion behaviour is driven directly by the library, this definition
only converts to the relevant data type after reading.
method
Read method, i.e. the image library used for reading images.
Other Parameters
----------------
attributes
{:func:`colour.io.read_image_OpenImageIO`},
Whether to return the image attributes.
Returns
-------
:class`numpy.ndarray`
Image data.
Notes
-----
- If the given method is *OpenImageIO* but the library is not available
writing will be performed by *Imageio*.
- If the given method is *Imageio*, ``kwargs`` is passed directly to the
wrapped definition.
- For convenience, single channel images are squeezed to 2D arrays.
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMS_Test_Pattern.exr",
... )
>>> image = read_image(path)
>>> image.shape # doctest: +SKIP
(1267, 1274, 3)
>>> image.dtype
dtype('float32')
""" # noqa: D405, D407, D410, D411, D414
method = validate_method(method, tuple(READ_IMAGE_METHODS))
if (
method == "openimageio" and not is_openimageio_installed()
): # pragma: no cover
usage_warning(
'"OpenImageIO" related API features are not available, '
'switching to "Imageio"!'
)
method = "Imageio"
function = READ_IMAGE_METHODS[method]
if method == "openimageio": # pragma: no cover
kwargs = filter_kwargs(function, **kwargs)
return function(path, bit_depth, **kwargs)
@required("OpenImageIO")
def write_image_OpenImageIO(
image: ArrayLike,
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
attributes: Sequence | None = None,
) -> bool:
"""
Write given image data at given path using *OpenImageIO*.
Parameters
----------
image
Image data.
path
Image path.
bit_depth
Bit-depth to write the image at, the bit-depth conversion behaviour is
ruled directly by *OpenImageIO*.
attributes
An array of :class:`colour.io.ImageAttribute_Specification` class
instances used to set attributes of the image.
Returns
-------
:class:`bool`
Definition success.
Examples
--------
Basic image writing:
>>> import os
>>> import colour
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMS_Test_Pattern.exr",
... )
>>> image = read_image(path) # doctest: +SKIP
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMSTestPattern.tif",
... )
>>> write_image_OpenImageIO(image, path) # doctest: +SKIP
True
Advanced image writing while setting attributes:
>>> compression = ImageAttribute_Specification("Compression", "none")
>>> write_image_OpenImageIO(image, path, "uint8", [compression])
... # doctest: +SKIP
True
Writing an "ACES" compliant "EXR" file:
>>> if is_openimageio_installed(): # doctest: +SKIP
... from OpenImageIO import TypeDesc
...
... chromaticities = (
... 0.7347,
... 0.2653,
... 0.0,
... 1.0,
... 0.0001,
... -0.077,
... 0.32168,
... 0.33767,
... )
... attributes = [
... ImageAttribute_Specification("acesImageContainerFlag", True),
... ImageAttribute_Specification(
... "chromaticities", chromaticities, TypeDesc("float[8]")
... ),
... ImageAttribute_Specification("compression", "none"),
... ]
... write_image_OpenImageIO(image, path, attributes=attributes)
...
""" # noqa: D405, D407, D410, D411
from OpenImageIO import ImageOutput, ImageSpec
image = as_float_array(image)
path = str(path)
attributes = cast(list, optional(attributes, []))
bit_depth_specification = MAPPING_BIT_DEPTH[bit_depth]
if bit_depth_specification.numpy in [np.uint8, np.uint16]:
minimum, maximum = (
np.iinfo(bit_depth_specification.numpy).min,
np.iinfo(bit_depth_specification.numpy).max,
)
image = np.clip(image * maximum, minimum, maximum)
image = as_int_array(image, bit_depth_specification.numpy)
image = image.astype(bit_depth_specification.numpy)
if image.ndim == 2:
height, width = image.shape
channels = 1
else:
height, width, channels = image.shape
specification = ImageSpec(
width, height, channels, bit_depth_specification.openimageio
)
for attribute in attributes:
name = str(attribute.name)
value = (
str(attribute.value)
if isinstance(attribute.value, str)
else attribute.value
)
type_ = attribute.type_
if attribute.type_ is None:
specification.attribute(name, value)
else:
specification.attribute(name, type_, value)
image_output = ImageOutput.create(path)
image_output.open(path, specification)
image_output.write_image(image)
image_output.close()
return True
def write_image_Imageio(
image: ArrayLike,
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
**kwargs: Any,
) -> bytes | None:
"""
Write given image data at given path using *Imageio*.
Parameters
----------
image
Image data.
path
Image path.
bit_depth
Bit-depth to write the image at, the image data is converted with
:func:`colour.io.convert_bit_depth` definition prior to writing the
image.
Other Parameters
----------------
kwargs
Keywords arguments.
Returns
-------
:class:`bool`
Definition success.
Notes
-----
- It is possible to control how the image are saved by the *Freeimage*
backend by using the ``flags`` keyword argument and passing a desired
value. See the *Load / Save flag constants* section in
https://sourceforge.net/p/freeimage/svn/HEAD/tree/FreeImage/trunk/\
Source/FreeImage.h
Examples
--------
>>> import os
>>> import colour
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMS_Test_Pattern.exr",
... )
>>> image = read_image(path) # doctest: +SKIP
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMSTestPattern.tif",
... )
>>> write_image_Imageio(image, path) # doctest: +SKIP
True
"""
from imageio import imwrite
if all(
[
path.lower().endswith(".exr"),
bit_depth in ("float32", "float64", "float128"),
]
):
# Ensures that "OpenEXR" images are saved as "Float32" according to the
# image bit-depth.
kwargs["flags"] = 0x0001
image = convert_bit_depth(image, bit_depth)
return imwrite(path, image, **kwargs)
WRITE_IMAGE_METHODS: CanonicalMapping = CanonicalMapping(
{
"Imageio": write_image_Imageio,
"OpenImageIO": write_image_OpenImageIO,
}
)
WRITE_IMAGE_METHODS.__doc__ = """
Supported image write methods.
"""
def write_image(
image: ArrayLike,
path: str,
bit_depth: Literal[
"uint8", "uint16", "float16", "float32", "float64", "float128"
] = "float32",
method: Literal["Imageio", "OpenImageIO"] | str = "OpenImageIO",
**kwargs: Any,
) -> bool:
"""
Write given image data at given path using given method.
Parameters
----------
image
Image data.
path
Image path.
bit_depth
Bit-depth to write the image at, for the *Imageio* method, the image
data is converted with :func:`colour.io.convert_bit_depth` definition
prior to writing the image.
method
Write method, i.e. the image library used for writing images.
Other Parameters
----------------
attributes
{:func:`colour.io.write_image_OpenImageIO`},
An array of :class:`colour.io.ImageAttribute_Specification` class
instances used to set attributes of the image.
Returns
-------
:class:`bool`
Definition success.
Notes
-----
- If the given method is *OpenImageIO* but the library is not available
writing will be performed by *Imageio*.
- If the given method is *Imageio*, ``kwargs`` is passed directly to the
wrapped definition.
- It is possible to control how the image are saved by the *Freeimage*
backend by using the ``flags`` keyword argument and passing a desired
value. See the *Load / Save flag constants* section in
https://sourceforge.net/p/freeimage/svn/HEAD/tree/FreeImage/trunk/\
Source/FreeImage.h
Examples
--------
Basic image writing:
>>> import os
>>> import colour
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMS_Test_Pattern.exr",
... )
>>> image = read_image(path) # doctest: +SKIP
>>> path = os.path.join(
... colour.__path__[0],
... "io",
... "tests",
... "resources",
... "CMSTestPattern.tif",
... )
>>> write_image(image, path) # doctest: +SKIP
True
Advanced image writing while setting attributes using *OpenImageIO*:
>>> compression = ImageAttribute_Specification("Compression", "none")
>>> write_image(image, path, bit_depth="uint8", attributes=[compression])
... # doctest: +SKIP
True
""" # noqa: D405, D407, D410, D411, D414
method = validate_method(method, tuple(WRITE_IMAGE_METHODS))
if (
method == "openimageio" and not is_openimageio_installed()
): # pragma: no cover
usage_warning(
'"OpenImageIO" related API features are not available, '
'switching to "Imageio"!'
)
method = "Imageio"
function = WRITE_IMAGE_METHODS[method]
if method == "openimageio": # pragma: no cover
kwargs = filter_kwargs(function, **kwargs)
return function(image, path, bit_depth, **kwargs)
def as_3_channels_image(a: ArrayLike) -> NDArrayFloat:
"""
Convert given array :math:`a` to a 3-channels image-like representation.
Parameters
----------
a
Array :math:`a` to convert to a 3-channels image-like representation.
Returns
-------
:class`numpy.ndarray`
3-channels image-like representation of array :math:`a`.
Examples
--------
>>> as_3_channels_image(0.18)
array([[[ 0.18, 0.18, 0.18]]])
>>> as_3_channels_image([0.18])
array([[[ 0.18, 0.18, 0.18]]])
>>> as_3_channels_image([0.18, 0.18, 0.18])
array([[[ 0.18, 0.18, 0.18]]])
>>> as_3_channels_image([[0.18, 0.18, 0.18]])
array([[[ 0.18, 0.18, 0.18]]])
>>> as_3_channels_image([[[0.18, 0.18, 0.18]]])
array([[[ 0.18, 0.18, 0.18]]])
"""
a = as_float_array(a)
if len(a.shape) == 0:
a = tstack([a, a, a])
if a.shape[-1] == 1:
a = tstack([a, a, a])
if len(a.shape) == 1:
a = a[None, None, ...]
elif len(a.shape) == 2:
a = a[None, ...]
return a
|
85a89fc1142a517eadff8cb46260fa0cf350f798
|
f791462fb1286607d16459c1602d133f8d8c8b59
|
/test/ops/test_indexing.py
|
cf8b65f9aeebd27d60b5741e0f3ab1fe46a28494
|
[
"Apache-2.0"
] |
permissive
|
pyro-ppl/numpyro
|
b071ed2bd93be41bafc3da8764c9f5617f996d92
|
ca96eca8e8e1531e71ba559ef7a8ad3b4b68cbc2
|
refs/heads/master
| 2023-09-03T15:56:13.252692
| 2023-08-28T14:32:25
| 2023-08-28T14:32:25
| 170,580,540
| 1,941
| 219
|
Apache-2.0
| 2023-09-04T11:26:11
| 2019-02-13T21:13:59
|
Python
|
UTF-8
|
Python
| false
| false
| 5,498
|
py
|
test_indexing.py
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import itertools
import numpy as np
import pytest
import jax.lax as lax
import jax.numpy as jnp
import jax.random as random
import numpyro.distributions as dist
from numpyro.ops.indexing import Vindex
def z(*shape):
return jnp.zeros(shape, dtype=jnp.int32)
SHAPE_EXAMPLES = [
("Vindex(z())[...]", ()),
("Vindex(z(2))[...]", (2,)),
("Vindex(z(2))[...,0]", ()),
("Vindex(z(2))[...,:]", (2,)),
("Vindex(z(2))[...,z(3)]", (3,)),
("Vindex(z(2))[0]", ()),
("Vindex(z(2))[:]", (2,)),
("Vindex(z(2))[z(3)]", (3,)),
("Vindex(z(2,3))[...]", (2, 3)),
("Vindex(z(2,3))[...,0]", (2,)),
("Vindex(z(2,3))[...,:]", (2, 3)),
("Vindex(z(2,3))[...,z(2)]", (2,)),
("Vindex(z(2,3))[...,z(4,1)]", (4, 2)),
("Vindex(z(2,3))[...,0,0]", ()),
("Vindex(z(2,3))[...,0,:]", (3,)),
("Vindex(z(2,3))[...,0,z(4)]", (4,)),
("Vindex(z(2,3))[...,:,0]", (2,)),
("Vindex(z(2,3))[...,:,:]", (2, 3)),
("Vindex(z(2,3))[...,:,z(4)]", (4, 2)),
("Vindex(z(2,3))[...,z(4),0]", (4,)),
("Vindex(z(2,3))[...,z(4),:]", (4, 3)),
("Vindex(z(2,3))[...,z(4),z(4)]", (4,)),
("Vindex(z(2,3))[...,z(5,1),z(4)]", (5, 4)),
("Vindex(z(2,3))[...,z(4),z(5,1)]", (5, 4)),
("Vindex(z(2,3))[0,0]", ()),
("Vindex(z(2,3))[0,:]", (3,)),
("Vindex(z(2,3))[0,z(4)]", (4,)),
("Vindex(z(2,3))[:,0]", (2,)),
("Vindex(z(2,3))[:,:]", (2, 3)),
("Vindex(z(2,3))[:,z(4)]", (4, 2)),
("Vindex(z(2,3))[z(4),0]", (4,)),
("Vindex(z(2,3))[z(4),:]", (4, 3)),
("Vindex(z(2,3))[z(4)]", (4, 3)),
("Vindex(z(2,3))[z(4),z(4)]", (4,)),
("Vindex(z(2,3))[z(5,1),z(4)]", (5, 4)),
("Vindex(z(2,3))[z(4),z(5,1)]", (5, 4)),
("Vindex(z(2,3,4))[...]", (2, 3, 4)),
("Vindex(z(2,3,4))[...,z(3)]", (2, 3)),
("Vindex(z(2,3,4))[...,z(2,1)]", (2, 3)),
("Vindex(z(2,3,4))[...,z(2,3)]", (2, 3)),
("Vindex(z(2,3,4))[...,z(5,1,1)]", (5, 2, 3)),
("Vindex(z(2,3,4))[...,z(2),0]", (2,)),
("Vindex(z(2,3,4))[...,z(5,1),0]", (5, 2)),
("Vindex(z(2,3,4))[...,z(2),:]", (2, 4)),
("Vindex(z(2,3,4))[...,z(5,1),:]", (5, 2, 4)),
("Vindex(z(2,3,4))[...,z(5),0,0]", (5,)),
("Vindex(z(2,3,4))[...,z(5),0,:]", (5, 4)),
("Vindex(z(2,3,4))[...,z(5),:,0]", (5, 3)),
("Vindex(z(2,3,4))[...,z(5),:,:]", (5, 3, 4)),
("Vindex(z(2,3,4))[0,0,z(5)]", (5,)),
("Vindex(z(2,3,4))[0,:,z(5)]", (5, 3)),
("Vindex(z(2,3,4))[0,z(5),0]", (5,)),
("Vindex(z(2,3,4))[0,z(5),:]", (5, 4)),
("Vindex(z(2,3,4))[0,z(5),z(5)]", (5,)),
("Vindex(z(2,3,4))[0,z(5,1),z(6)]", (5, 6)),
("Vindex(z(2,3,4))[0,z(6),z(5,1)]", (5, 6)),
("Vindex(z(2,3,4))[:,0,z(5)]", (5, 2)),
("Vindex(z(2,3,4))[:,:,z(5)]", (5, 2, 3)),
("Vindex(z(2,3,4))[:,z(5),0]", (5, 2)),
("Vindex(z(2,3,4))[:,z(5),:]", (5, 2, 4)),
("Vindex(z(2,3,4))[:,z(5),z(5)]", (5, 2)),
("Vindex(z(2,3,4))[:,z(5,1),z(6)]", (5, 6, 2)),
("Vindex(z(2,3,4))[:,z(6),z(5,1)]", (5, 6, 2)),
("Vindex(z(2,3,4))[z(5),0,0]", (5,)),
("Vindex(z(2,3,4))[z(5),0,:]", (5, 4)),
("Vindex(z(2,3,4))[z(5),:,0]", (5, 3)),
("Vindex(z(2,3,4))[z(5),:,:]", (5, 3, 4)),
("Vindex(z(2,3,4))[z(5),0,z(5)]", (5,)),
("Vindex(z(2,3,4))[z(5,1),0,z(6)]", (5, 6)),
("Vindex(z(2,3,4))[z(6),0,z(5,1)]", (5, 6)),
("Vindex(z(2,3,4))[z(5),:,z(5)]", (5, 3)),
("Vindex(z(2,3,4))[z(5,1),:,z(6)]", (5, 6, 3)),
("Vindex(z(2,3,4))[z(6),:,z(5,1)]", (5, 6, 3)),
]
@pytest.mark.parametrize("expression,expected_shape", SHAPE_EXAMPLES, ids=str)
def test_shape(expression, expected_shape):
result = eval(expression)
assert result.shape == expected_shape
@pytest.mark.parametrize("event_shape", [(), (7,)], ids=str)
@pytest.mark.parametrize("j_shape", [(), (2,), (3, 1), (4, 1, 1), (4, 3, 2)], ids=str)
@pytest.mark.parametrize("i_shape", [(), (2,), (3, 1), (4, 1, 1), (4, 3, 2)], ids=str)
@pytest.mark.parametrize("x_shape", [(), (2,), (3, 1), (4, 1, 1), (4, 3, 2)], ids=str)
def test_value(x_shape, i_shape, j_shape, event_shape):
x = jnp.array(np.random.rand(*(x_shape + (5, 6) + event_shape)))
i = dist.Categorical(jnp.ones((5,))).sample(random.PRNGKey(1), i_shape)
j = dist.Categorical(jnp.ones((6,))).sample(random.PRNGKey(2), j_shape)
if event_shape:
actual = Vindex(x)[..., i, j, :]
else:
actual = Vindex(x)[..., i, j]
shape = lax.broadcast_shapes(x_shape, i_shape, j_shape)
x = jnp.broadcast_to(x, shape + (5, 6) + event_shape)
i = jnp.broadcast_to(i, shape)
j = jnp.broadcast_to(j, shape)
expected = np.empty(shape + event_shape, dtype=x.dtype)
for ind in itertools.product(*map(range, shape)) if shape else [()]:
expected[ind] = x[ind + (i[ind].item(), j[ind].item())]
assert jnp.all(actual == jnp.array(expected, dtype=x.dtype))
@pytest.mark.parametrize("prev_enum_dim,curr_enum_dim", [(-3, -4), (-4, -5), (-5, -3)])
def test_hmm_example(prev_enum_dim, curr_enum_dim):
hidden_dim = 8
probs_x = jnp.array(np.random.rand(hidden_dim, hidden_dim, hidden_dim))
x_prev = jnp.arange(hidden_dim).reshape((-1,) + (1,) * (-1 - prev_enum_dim))
x_curr = jnp.arange(hidden_dim).reshape((-1,) + (1,) * (-1 - curr_enum_dim))
expected = probs_x[
x_prev.reshape(x_prev.shape + (1,)),
x_curr.reshape(x_curr.shape + (1,)),
jnp.arange(hidden_dim),
]
actual = Vindex(probs_x)[x_prev, x_curr, :]
assert jnp.all(actual == expected)
|
539d063d55c642290cb8ff94d97c0f7a9c055192
|
a54f78f026c937b5a8a31180024496748169db91
|
/nibabel/tests/test_onetime.py
|
426702fa4303361718e7dce3310adcfd7feba92c
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"PDDL-1.0"
] |
permissive
|
nipy/nibabel
|
7017e29ee9e3e93d1085d9032c32f6d922b0e43d
|
8fea2a8e50aaf4d8b0d4bfff7a21b132914120ee
|
refs/heads/master
| 2023-08-22T07:12:46.167323
| 2023-08-06T23:46:30
| 2023-08-06T23:46:30
| 791,352
| 544
| 239
|
NOASSERTION
| 2023-09-08T19:10:32
| 2010-07-22T16:28:30
|
Python
|
UTF-8
|
Python
| false
| false
| 838
|
py
|
test_onetime.py
|
import pytest
from nibabel.onetime import auto_attr, setattr_on_read
from nibabel.testing import expires
@expires('5.0.0')
def test_setattr_on_read():
with pytest.deprecated_call():
class MagicProp:
@setattr_on_read
def a(self):
return object()
x = MagicProp()
assert 'a' not in x.__dict__
obj = x.a
assert 'a' in x.__dict__
# Each call to object() produces a unique object. Verify we get the same one every time.
assert x.a is obj
def test_auto_attr():
class MagicProp:
@auto_attr
def a(self):
return object()
x = MagicProp()
assert 'a' not in x.__dict__
obj = x.a
assert 'a' in x.__dict__
# Each call to object() produces a unique object. Verify we get the same one every time.
assert x.a is obj
|
55bec42e9dcf0c29b5bd64d8dd20fc600886683c
|
90d8435ae5908fae54dff1c83d9d9a1cc26fab7d
|
/quantecon/_ce_util.py
|
314e311b36bf2fe83d1a834ece3d22fd04eb5701
|
[
"MIT"
] |
permissive
|
QuantEcon/QuantEcon.py
|
7537a1c9615ed9415e4fef07ff149c3ba65e3c36
|
d1e4d22c873d6e84bb196e5600f555916cc3b86f
|
refs/heads/main
| 2023-09-02T08:34:35.563150
| 2023-08-09T01:37:43
| 2023-08-09T01:37:43
| 8,941,695
| 1,767
| 2,290
|
MIT
| 2023-07-12T05:56:15
| 2013-03-22T00:27:36
|
Python
|
UTF-8
|
Python
| false
| false
| 3,142
|
py
|
_ce_util.py
|
"""
Utility functions used in CompEcon
Based routines found in the CompEcon toolbox by Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
from functools import reduce
import numpy as np
def ckron(*arrays):
"""
Repeatedly applies the np.kron function to an arbitrary number of
input arrays
Parameters
----------
*arrays : tuple/list of np.ndarray
Returns
-------
out : np.ndarray
The result of repeated kronecker products.
Notes
-----
Based of original function `ckron` in CompEcon toolbox by Miranda
and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational
Economics and Finance, MIT Press, 2002.
"""
return reduce(np.kron, arrays)
def gridmake(*arrays):
"""
Expands one or more vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
*arrays : tuple/list of np.ndarray
Tuple/list of vectors to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
if all([i.ndim == 1 for i in arrays]):
d = len(arrays)
if d == 2:
out = _gridmake2(*arrays)
else:
out = _gridmake2(arrays[0], arrays[1])
for arr in arrays[2:]:
out = _gridmake2(out, arr)
return out
else:
raise NotImplementedError("Come back here")
def _gridmake2(x1, x2):
"""
Expands two vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
x1 : np.ndarray
First vector to be expanded.
x2 : np.ndarray
Second vector to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake2`` in CompEcon toolbox by
Miranda and Fackler.
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
if x1.ndim == 1 and x2.ndim == 1:
return np.column_stack([np.tile(x1, x2.shape[0]),
np.repeat(x2, x1.shape[0])])
elif x1.ndim > 1 and x2.ndim == 1:
first = np.tile(x1, (x2.shape[0], 1))
second = np.repeat(x2, x1.shape[0])
return np.column_stack([first, second])
else:
raise NotImplementedError("Come back here")
|
55c07c751ba2f44b82a56afe1d5aae54983b7b8f
|
a70cb68b4a1abcdad75ce8840254fa5881816901
|
/bin/alistadhoc
|
11eba44112c5ecca42c323d3b7b7a1bf7e69d4d4
|
[] |
no_license
|
sao-eht/eat
|
8b0841bcd247b40b438559dba412f061db24fdff
|
6c06e4904ba4c87e35e5f5ef490e93a862d11a80
|
refs/heads/master
| 2023-08-17T07:55:43.966646
| 2023-08-15T04:13:00
| 2023-08-15T04:13:00
| 39,165,032
| 129
| 31
| null | 2020-09-05T21:04:08
| 2015-07-15T22:53:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,736
|
alistadhoc
|
#!/usr/bin/env python
# create fourfit ad-hoc phase file from single baseline
# 2017-03-15 Lindy Blackburn
from eat.io import hops, util
import numpy as np
import pandas as pd
import argparse
fmt = {
'days':'{:010.6f}'.format,
'phase_unwrap':'{:6.1f}'.format
}
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='alist txt file')
parser.add_argument('nchan', help='number of channels', nargs='?', type=int, default=60)
parser.add_argument('-f', '--flip', help='flip phases, for use with the REM site (default is REF)', action="store_true"),
args = parser.parse_args()
# read alist file (autodetect version 5 or 6)
a = hops.read_alist(args.filename)
util.add_days(a)
"""
Note: according to doc/fourfit/file_based_pcal.tex,
The time points are considered to be either instantaneous values, or the
result of a linear average of the phase, symmetric about the specified epoch.
The algorithm within fourfit performs a linear interpolation between the
specified points, and then finds the average value of the piece-wise linear
function so derived over each accumulation period in a fringe fit. If the ap
data extends beyond the range of the piecewise linear pcal function, then the
function is extrapolated linearly (if possible) to cover the ap.
so an effort should be made to accurately timestamp the middle of the segment
add_days converts straight from timetag, which is halfway into scan for fingex
output, but rounded (?) to the nearest second boundary
"""
a['phase_unwrap'] = (-1. if args.flip else 1.) * np.unwrap(a.resid_phas, 180.)
# fmt = ['{:10.6f}'.format,] + ['{:6.1f}'.format,]*args.nchan
print a[['days',] + ['phase_unwrap',]*args.nchan].to_string(formatters=fmt, header=False, index=False)
|
|
a4fc2c5e7cfb81c8e68a9a9a1d7c643739d6bdee
|
90e76adae07c81392d64fdfcb95f659e8a0c3f11
|
/tests/functional/docs/test_streaming_body.py
|
c910d3b3fcd74989d62ed4c47832eb069121a983
|
[
"Apache-2.0",
"MPL-2.0",
"MIT"
] |
permissive
|
boto/botocore
|
b9468d08c83372cf6930643a15f87801b79ffddd
|
7275c5d6e9273caf3804e0ce9491af080518798c
|
refs/heads/develop
| 2023-09-01T18:11:40.617674
| 2023-08-31T18:58:50
| 2023-08-31T18:58:50
| 6,670,942
| 1,289
| 1,234
|
Apache-2.0
| 2023-09-13T17:23:42
| 2012-11-13T13:25:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,908
|
py
|
test_streaming_body.py
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore import xform_name
from botocore.docs.service import ServiceDocumenter
from tests.functional.docs import BaseDocsFunctionalTest
class TestStreamingBodyDocumentation(BaseDocsFunctionalTest):
def test_all_streaming_body_are_properly_documented(self):
for service in self._session.get_available_services():
client = self._session.create_client(
service,
region_name='us-east-1',
aws_access_key_id='foo',
aws_secret_access_key='bar',
)
service_model = client.meta.service_model
for operation in service_model.operation_names:
operation_model = service_model.operation_model(operation)
if operation_model.has_streaming_output:
self.assert_streaming_body_is_properly_documented(
service, xform_name(operation)
)
def assert_streaming_body_is_properly_documented(self, service, operation):
ServiceDocumenter(
service, self._session, self.root_services_path
).document_service()
contents = self.get_client_method_contents(service, operation)
method_docs = self.get_method_document_block(operation, contents)
self.assert_contains_line('StreamingBody', method_docs)
|
cf89691204d338ce72a144a62171e73f59dfbf84
|
24db6985a016c3e4767c95ca51190e659d0847cd
|
/tjctf2020/difficult_decryption/sol.py
|
00707e8ba21441021e5b3f9efba6040b43ed91ae
|
[
"MIT"
] |
permissive
|
datajerk/ctf-write-ups
|
463f53db224410a51df481b9e41b7777a09f3e2c
|
c33815911de3f4a66cbafbf5f12d7b57239250d9
|
refs/heads/master
| 2022-09-30T02:29:44.097435
| 2022-09-05T02:16:19
| 2022-09-05T02:16:19
| 204,361,251
| 136
| 36
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
sol.py
|
#!/usr/bin/python3
from sympy.ntheory.residue_ntheory import discrete_log
M=491988559103692092263984889813697016406
P=232042342203461569340683568996607232345
B=5
A=discrete_log(M,P,B)
message = 12259991521844666821961395299843462461536060465691388049371797540470
bobkey = 76405255723702450233149901853450417505
text = bytes.fromhex(hex(pow(bobkey, A, M) ^ message)[2:]).decode('ASCII')
print(text)
|
999749943ca9c6a8dcfe6dc52488ce21c3b36f45
|
9d0228f3f7ee9cee0794319d4affc161b0a7adc2
|
/qmpy/analysis/griddata.py
|
e6e3defc0ab6e4ca5a9252a8edf9068cbbc89211
|
[
"MIT"
] |
permissive
|
wolverton-research-group/qmpy
|
db8a450a5708aac63aa39e104745b5cb0a4fa930
|
dede5bdf4aa3ea1187a7bc273e86336c24aadb25
|
refs/heads/master
| 2023-01-24T17:18:48.335699
| 2022-08-23T01:12:29
| 2022-08-23T01:12:29
| 18,248,720
| 124
| 65
|
MIT
| 2023-01-11T02:04:51
| 2014-03-29T19:18:53
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,196
|
py
|
griddata.py
|
# qmpy/analysis/griddata
import numpy as np
import numpy.linalg as la
import qmpy.utils as utils
import itertools
class GridData:
"""
Container for 3d data, e.g. charge density or electron localization
function.
"""
def __init__(self, data, lattice=None):
"""
Arguments:
data: M x N x O sequence of data.
mesh:
spacing:
"""
self.data = np.array(data)
self.grads = np.gradient(data)
self.mesh = np.array(self.data.shape)
self.spacing = 1.0 / self.mesh
if lattice is None:
lattice = np.eye(3)
self.lattice = lattice
self.inv = la.inv(lattice)
def ind_to_cart(self, ind):
"""
Converts an [i,j,k] index to [X,Y,Z] cartesian coordinate.
"""
return np.dot(self.spacing * ind, self.lattice)
def ind_to_coord(self, ind):
"""
Converts an [i,j,k] index to [x,y,z] frational coordinate.
"""
return utils.wrap(self.spacing * ind)
def cart_to_coord(self, cart):
return utils.wrap(np.dot(self.inv.T, cart))
def interpolate(self, point, cart=False):
"""
Calculates the value at `point` using trilinear interpolation.
Arguments:
point: point to evaluate the value at.
Keyword Arguments:
cart: If True, the point is taken as a cartesian coordinate. If
not, it is assumed to be in fractional coordinates. default=False.
"""
if cart:
point = self.cart_to_coord(point)
point = utils.wrap(point)
x0, y0, z0 = (
int(np.floor(point[0] * self.mesh[0])),
int(np.floor(point[1] * self.mesh[1])),
int(np.floor(point[2] * self.mesh[2])),
)
x, y, z = (
(point[0] * self.mesh[0]) % 1,
(point[1] * self.mesh[1]) % 1,
(point[2] * self.mesh[2]) % 1,
)
x1, y1, z1 = (
(x0 + 1) % self.mesh[0],
(y0 + 1) % self.mesh[1],
(z0 + 1) % self.mesh[2],
)
interp_val = (
self.data[x0, y0, z0] * (1 - x) * (1 - y) * (1 - z)
+ self.data[x1, y0, z0] * x * (1 - y) * (1 - z)
+ self.data[x0, y1, z0] * (1 - x) * y * (1 - z)
+ self.data[x0, y0, z1] * (1 - x) * (1 - y) * z
+ self.data[x1, y1, z0] * x * y * (1 - z)
+ self.data[x1, y0, z1] * x * (1 - y) * z
+ self.data[x0, y1, z1] * (1 - x) * y * z
+ self.data[x1, y1, z1] * x * y * z
)
return interp_val
def local_min(self, index):
"""
Starting from `index` find the local value minimum.
Returns:
index: shape (3,) index of local minimum.
value: Value of grid at the local minimum.
"""
## TODO: support inputs of 'coord' or 'cart'
neighbors = list(itertools.permutations([-1, 0, 1], r=3))
neighbors = [np.array(index) + n for n in neighbors]
neighbors = [n % self.mesh for n in neighbors]
values = [self.data[tuple(n)] for n in neighbors]
print(values)
lowest = np.argsort(values)[0]
print(lowest)
if values[lowest] < self.data[tuple(index)]:
return self.local_min(neighbors[lowest])
return index, self.data[tuple(index)]
def find_min_coord(self, N=1):
"""
Find the `N` lowest valued indices.
"""
coords = []
coord_vector = self.data.flatten()
sorted_inds = list(coord_vector.argsort())
count = 0
while N > 0:
min_ind = unravel_index(sorted_inds[count], self.mesh)
count += 1
if (
self.local_min(min_ind)
and self.data[min_ind[0], min_ind[1], min_ind[2]] > 0
):
coords.append(np.array(min_ind) * self.spacing)
N -= 1
return coords
def path(self, origin, end):
"""
Gets a 1D array of values for a line connecting `origin` and `end`.
"""
path_dens = []
origin = np.array([float(i) for i in origin])
end = np.array([float(i) for i in end])
result = []
for i in np.mgrid[0:1:50j]:
point = (1 - i) * origin + i * end
result.append((i, self.interpolate(point)))
return result
def slice(self, point, orientation):
"""
Return a 2D array of values for a slice through the GridData passing
through `point` with normal vector `orientation`.
"""
res = int(max(self.mesh) / 3.0)
orientation = [float(x) for x in orientation]
point = [float(x) for x in point]
slice_vals = np.zeros((res, res))
slice_coords = np.zeros((res, res))
a, b, c = orientation
x0, y0, z0 = point
if c != 0:
for i in range(res):
for j in range(res):
x = float(i) / float(res)
y = float(j) / float(res)
slice_coords[i, j] = (-(a * (x - x0) + b * (y - y0)) / c + z0) % 1
slice_vals[i, j] = self.interpolate(
np.array([x, y, slice_coords[i, j]])
)
elif b != 0:
for i in range(res):
for k in range(res):
x = float(i) / float(res)
z = float(k) / float(res)
slice_coords[i, k] = (-(a * (x - x0) + c * (z - z0)) / b + y0) % 1
slice_vals[i, k] = self.interpolate(
np.array([x, slice_coords[i, k], z])
)
elif a != 0:
for j in range(res):
for k in range(res):
y = float(j) / float(res)
z = float(k) / float(res)
slice_coords[j, k] = (-(b * (y - y0) + c * (z - z0)) / a + x0) % 1
slice_vals[j, k] = self.interpolate(
np.array([slice_coords[j, k], y, z])
)
return slice_vals, slice_coords
|
5f44373856a231f2e835db5b18f283868002d468
|
b8a25cba3c725bda12e78454910bfac6658283cd
|
/nyaggle/feature/base.py
|
2c55cc510603e3041fb22b8a48536c2cc6e18886
|
[
"MIT"
] |
permissive
|
nyanp/nyaggle
|
e3f125fbba816c77aefb21fef5e220bd7ee36949
|
86a9db4375d4d4974a71692a756d1c4818e15122
|
refs/heads/master
| 2023-08-16T13:01:43.565349
| 2023-07-22T14:15:41
| 2023-07-22T14:15:41
| 228,955,139
| 286
| 41
|
MIT
| 2023-09-10T04:09:54
| 2019-12-19T02:01:19
|
Python
|
UTF-8
|
Python
| false
| false
| 123
|
py
|
base.py
|
from sklearn.base import BaseEstimator, TransformerMixin
class BaseFeaturizer(BaseEstimator, TransformerMixin):
pass
|
8592a1d2af8fc5cd3ea13c8b51c6d070499b8d0e
|
83da171ef6c1f8201a88534737c9e753247afee1
|
/oasislmf/pytools/pla/manager.py
|
6d43934a76953dcdef59be39d4a91e184de74fbf
|
[
"BSD-3-Clause"
] |
permissive
|
OasisLMF/OasisLMF
|
7bb691eb08f11641887257fe154d5356509707c5
|
23e704c335629ccd010969b1090446cfa3f384d5
|
refs/heads/main
| 2023-08-25T12:14:44.449756
| 2023-08-23T10:08:17
| 2023-08-23T10:08:17
| 117,560,568
| 122
| 53
|
BSD-3-Clause
| 2023-09-14T09:37:17
| 2018-01-15T15:12:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,410
|
py
|
manager.py
|
from contextlib import ExitStack
import os
import sys
from .streams import read_and_write_streams
from .structure import (
get_items_amplifications,
get_post_loss_amplification_factors
)
from oasislmf.pytools.utils import redirect_logging
@redirect_logging(exec_name='plapy')
def run(run_dir, file_in, file_out, input_path, static_path):
"""
Execeute the main Post Loss Amplification workflow.
Args:
run_dir (str): the directory of where the process is running
file_in (str): file name of input stream
file_out (str): file name of output streak
input_path (str): path to amplifications.bin
static_path (Str): path to lossfactors.bin
Returns:
0 (int): if no errors occurred
"""
input_path = os.path.join(run_dir, input_path)
static_path = os.path.join(run_dir, static_path)
items_amps = get_items_amplifications(input_path)
plafactors = get_post_loss_amplification_factors(static_path)
with ExitStack() as stack:
if file_in is None:
stream_in = sys.stdin.buffer
else:
stream_in = stack.enter_context(open(file_in, 'rb'))
if file_out is None:
stream_out = sys.stdout.buffer
else:
stream_out = stack.enter_context(open(file_out, 'wb'))
read_and_write_streams(stream_in, stream_out, items_amps, plafactors)
return 0
|
1fbd2ae345d341b619c8914bef860e93fd7eba45
|
2d9a3ce2a04190d0032e8a298829022260b1d76b
|
/indra/sources/biofactoid/api.py
|
b0c46755a17ff09227192a9d2e78937e1c37b1d1
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
sorgerlab/indra
|
f127a0f9bdd2d3f48df14575883fd31e2f4de4bf
|
6d6ca1174792b6c5a05cbf3afcb9f138fabcec6a
|
refs/heads/master
| 2023-08-21T13:25:54.654995
| 2023-06-11T16:46:41
| 2023-06-11T16:46:41
| 22,848,436
| 158
| 61
|
BSD-2-Clause
| 2023-08-30T21:47:59
| 2014-08-11T17:44:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,130
|
py
|
api.py
|
import requests
from .processor import BioFactoidProcessor
biofactoid_url = 'https://biofactoid.org/api/document'
biofactoid_unstable_url = 'https://unstable.factoid.baderlab.org/api/document'
def process_from_web(url=None):
"""Process BioFactoid documents from the web.
Parameters
----------
url : Optional[str]
The URL for the web service endpoint which contains all the
document data.
Returns
-------
BioFactoidProcessor
A processor which contains extracted INDRA Statements in its
statements attribute.
"""
url = url if url else biofactoid_url
res = requests.get(url)
res.raise_for_status()
return process_json(res.json())
def process_json(biofactoid_json):
"""Process BioFactoid JSON.
Parameters
----------
biofactoid_json : json
The BioFactoid JSON object to process.
Returns
-------
BioFactoidProcessor
A processor which contains extracted INDRA Statements in its
statements attribute.
"""
bp = BioFactoidProcessor(biofactoid_json)
bp.extract_statements()
return bp
|
baf48cdceaaf4dc48660780d84372a35c6e9d39e
|
549275146dc8ecdba9144a6aed2796baa1639eb3
|
/Codes/Liam/172_factorial_trailing_zeroes.py
|
11fea511162b7f3aca3cb069ffab430ad37dbd02
|
[
"Apache-2.0"
] |
permissive
|
asdf2014/algorithm
|
fdb07986746a3e5c36bfc66f4b6b7cb60850ff84
|
b0ed7a36f47b66c04b908eb67f2146843a9c71a3
|
refs/heads/master
| 2023-09-05T22:35:12.922729
| 2023-09-01T12:04:03
| 2023-09-01T12:04:03
| 108,250,452
| 270
| 87
|
Apache-2.0
| 2021-09-24T16:12:08
| 2017-10-25T09:45:27
|
Java
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
172_factorial_trailing_zeroes.py
|
# 执行用时 : 96 ms
# 内存消耗 : 29 MB
# 方案:算一下乘法因子里面5的个数
class Solution:
def trailingZeroes(self, n: int) -> int:
# 算一下乘法因子里面5的个数
count = 0
while n:
count += n // 5
n //= 5
return count
|
98418edde8f12e8f584eeb6976bb1691aed63805
|
0dddc0508138396c740901be4a0f9eebefb8fded
|
/ax/models/torch/botorch.py
|
dca5cc5d1ba4c3f3224c691c69b0d45fb1409b4e
|
[
"MIT"
] |
permissive
|
facebook/Ax
|
473beb143016f95f4ec381ed1bd95b32c1ca31f8
|
6443cee30cbf8cec290200a7420a3db08e4b5445
|
refs/heads/main
| 2023-09-01T09:29:13.684709
| 2023-08-31T21:49:30
| 2023-08-31T21:49:30
| 169,880,381
| 2,207
| 315
|
MIT
| 2023-09-14T21:26:51
| 2019-02-09T15:23:44
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 23,571
|
py
|
botorch.py
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from logging import Logger
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TCandidateMetadata
from ax.exceptions.core import DataRequiredError
from ax.models.torch.botorch_defaults import (
get_and_fit_model,
get_qLogNEI,
recommend_best_observed_point,
scipy_optimizer,
TAcqfConstructor,
)
from ax.models.torch.utils import (
_datasets_to_legacy_inputs,
_get_X_pending_and_observed,
_to_inequality_constraints,
normalize_indices,
predict_from_model,
subset_model,
)
from ax.models.torch_base import TorchGenResults, TorchModel, TorchOptConfig
from ax.models.types import TConfig
from ax.utils.common.constants import Keys
from ax.utils.common.docutils import copy_doc
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.models import ModelList
from botorch.models.model import Model
from botorch.utils.datasets import SupervisedDataset
from botorch.utils.transforms import is_fully_bayesian
from torch import Tensor
from torch.nn import ModuleList # @manual
logger: Logger = get_logger(__name__)
# pyre-fixme[33]: Aliased annotation cannot contain `Any`.
TModelConstructor = Callable[
[
List[Tensor],
List[Tensor],
List[Tensor],
List[int],
List[int],
List[str],
Optional[Dict[str, Tensor]],
Any,
],
Model,
]
TModelPredictor = Callable[[Model, Tensor], Tuple[Tensor, Tensor]]
# pyre-fixme[33]: Aliased annotation cannot contain `Any`.
TOptimizer = Callable[
[
AcquisitionFunction,
Tensor,
int,
Optional[List[Tuple[Tensor, Tensor, float]]],
Optional[List[Tuple[Tensor, Tensor, float]]],
Optional[Dict[int, float]],
Optional[Callable[[Tensor], Tensor]],
Any,
],
Tuple[Tensor, Tensor],
]
TBestPointRecommender = Callable[
[
TorchModel,
List[Tuple[float, float]],
Tensor,
Optional[Tuple[Tensor, Tensor]],
Optional[Tuple[Tensor, Tensor]],
Optional[Dict[int, float]],
Optional[TConfig],
Optional[Dict[int, float]],
],
Optional[Tensor],
]
class BotorchModel(TorchModel):
r"""
Customizable botorch model.
By default, this uses a noisy Log Expected Improvement (qLogNEI) acquisition
function on top of a model made up of separate GPs, one for each outcome. This
behavior can be modified by providing custom implementations of the following
components:
- a `model_constructor` that instantiates and fits a model on data
- a `model_predictor` that predicts outcomes using the fitted model
- a `acqf_constructor` that creates an acquisition function from a fitted model
- a `acqf_optimizer` that optimizes the acquisition function
- a `best_point_recommender` that recommends a current "best" point (i.e.,
what the model recommends if the learning process ended now)
Args:
model_constructor: A callable that instantiates and fits a model on data,
with signature as described below.
model_predictor: A callable that predicts using the fitted model, with
signature as described below.
acqf_constructor: A callable that creates an acquisition function from a
fitted model, with signature as described below.
acqf_optimizer: A callable that optimizes the acquisition function, with
signature as described below.
best_point_recommender: A callable that recommends the best point, with
signature as described below.
refit_on_cv: If True, refit the model for each fold when performing
cross-validation.
refit_on_update: If True, refit the model after updating the training
data using the `update` method.
warm_start_refitting: If True, start model refitting from previous
model parameters in order to speed up the fitting process.
prior: A optinal dictionary that contains the specification of GP model prior.
Currently, the keys include:
- covar_module_prior: prior on covariance matrix e.g.
{"lengthscale_prior": GammaPrior(3.0, 6.0)}.
- type: type of prior on task covariance matrix e.g.`LKJCovariancePrior`.
- sd_prior: A scalar prior over nonnegative numbers, which is used for the
default LKJCovariancePrior task_covar_prior.
- eta: The eta parameter on the default LKJ task_covar_prior.
Call signatures:
::
model_constructor(
Xs,
Ys,
Yvars,
task_features,
fidelity_features,
metric_names,
state_dict,
**kwargs,
) -> model
Here `Xs`, `Ys`, `Yvars` are lists of tensors (one element per outcome),
`task_features` identifies columns of Xs that should be modeled as a task,
`fidelity_features` is a list of ints that specify the positions of fidelity
parameters in 'Xs', `metric_names` provides the names of each `Y` in `Ys`,
`state_dict` is a pytorch module state dict, and `model` is a BoTorch `Model`.
Optional kwargs are being passed through from the `BotorchModel` constructor.
This callable is assumed to return a fitted BoTorch model that has the same
dtype and lives on the same device as the input tensors.
::
model_predictor(model, X) -> [mean, cov]
Here `model` is a fitted botorch model, `X` is a tensor of candidate points,
and `mean` and `cov` are the posterior mean and covariance, respectively.
::
acqf_constructor(
model,
objective_weights,
outcome_constraints,
X_observed,
X_pending,
**kwargs,
) -> acq_function
Here `model` is a botorch `Model`, `objective_weights` is a tensor of weights
for the model outputs, `outcome_constraints` is a tuple of tensors describing
the (linear) outcome constraints, `X_observed` are previously observed points,
and `X_pending` are points whose evaluation is pending. `acq_function` is a
BoTorch acquisition function crafted from these inputs. For additional
details on the arguments, see `get_qLogNEI`.
::
acqf_optimizer(
acq_function,
bounds,
n,
inequality_constraints,
equality_constraints,
fixed_features,
rounding_func,
**kwargs,
) -> candidates
Here `acq_function` is a BoTorch `AcquisitionFunction`, `bounds` is a tensor
containing bounds on the parameters, `n` is the number of candidates to be
generated, `inequality_constraints` are inequality constraints on parameter
values, `fixed_features` specifies features that should be fixed during
generation, and `rounding_func` is a callback that rounds an optimization
result appropriately. `candidates` is a tensor of generated candidates.
For additional details on the arguments, see `scipy_optimizer`.
::
best_point_recommender(
model,
bounds,
objective_weights,
outcome_constraints,
linear_constraints,
fixed_features,
model_gen_options,
target_fidelities,
) -> candidates
Here `model` is a TorchModel, `bounds` is a list of tuples containing bounds
on the parameters, `objective_weights` is a tensor of weights for the model outputs,
`outcome_constraints` is a tuple of tensors describing the (linear) outcome
constraints, `linear_constraints` is a tuple of tensors describing constraints
on the design, `fixed_features` specifies features that should be fixed during
generation, `model_gen_options` is a config dictionary that can contain
model-specific options, and `target_fidelities` is a map from fidelity feature
column indices to their respective target fidelities, used for multi-fidelity
optimization problems. % TODO: refer to an example.
"""
dtype: Optional[torch.dtype]
device: Optional[torch.device]
Xs: List[Tensor]
Ys: List[Tensor]
Yvars: List[Tensor]
_model: Optional[Model]
_search_space_digest: Optional[SearchSpaceDigest] = None
def __init__(
self,
model_constructor: TModelConstructor = get_and_fit_model,
model_predictor: TModelPredictor = predict_from_model,
acqf_constructor: TAcqfConstructor = get_qLogNEI,
# pyre-fixme[9]: acqf_optimizer declared/used type mismatch
acqf_optimizer: TOptimizer = scipy_optimizer,
best_point_recommender: TBestPointRecommender = recommend_best_observed_point,
refit_on_cv: bool = False,
refit_on_update: bool = True,
warm_start_refitting: bool = True,
use_input_warping: bool = False,
use_loocv_pseudo_likelihood: bool = False,
prior: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> None:
self.model_constructor = model_constructor
self.model_predictor = model_predictor
self.acqf_constructor = acqf_constructor
self.acqf_optimizer = acqf_optimizer
self.best_point_recommender = best_point_recommender
# pyre-fixme[4]: Attribute must be annotated.
self._kwargs = kwargs
self.refit_on_cv = refit_on_cv
self.refit_on_update = refit_on_update
self.warm_start_refitting = warm_start_refitting
self.use_input_warping = use_input_warping
self.use_loocv_pseudo_likelihood = use_loocv_pseudo_likelihood
self.prior = prior
self._model: Optional[Model] = None
self.Xs = []
self.Ys = []
self.Yvars = []
self.dtype = None
self.device = None
self.task_features: List[int] = []
self.fidelity_features: List[int] = []
self.metric_names: List[str] = []
@copy_doc(TorchModel.fit)
def fit(
self,
datasets: List[SupervisedDataset],
metric_names: List[str],
search_space_digest: SearchSpaceDigest,
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
) -> None:
if len(datasets) == 0:
raise DataRequiredError("BotorchModel.fit requires non-empty data sets.")
self.Xs, self.Ys, self.Yvars = _datasets_to_legacy_inputs(datasets=datasets)
self.metric_names = metric_names
# Store search space info for later use (e.g. during generation)
self._search_space_digest = search_space_digest
self.dtype = self.Xs[0].dtype
self.device = self.Xs[0].device
self.task_features = normalize_indices(
search_space_digest.task_features, d=self.Xs[0].size(-1)
)
self.fidelity_features = normalize_indices(
search_space_digest.fidelity_features, d=self.Xs[0].size(-1)
)
self._model = self.model_constructor( # pyre-ignore [28]
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
task_features=self.task_features,
fidelity_features=self.fidelity_features,
metric_names=self.metric_names,
use_input_warping=self.use_input_warping,
use_loocv_pseudo_likelihood=self.use_loocv_pseudo_likelihood,
prior=self.prior,
**self._kwargs,
)
@copy_doc(TorchModel.predict)
def predict(self, X: Tensor) -> Tuple[Tensor, Tensor]:
return self.model_predictor(model=self.model, X=X) # pyre-ignore [28]
@copy_doc(TorchModel.gen)
def gen(
self,
n: int,
search_space_digest: SearchSpaceDigest,
torch_opt_config: TorchOptConfig,
) -> TorchGenResults:
options = torch_opt_config.model_gen_options or {}
acf_options = options.get(Keys.ACQF_KWARGS, {})
optimizer_options = options.get(Keys.OPTIMIZER_KWARGS, {})
if search_space_digest.target_fidelities:
raise NotImplementedError(
"target_fidelities not implemented for base BotorchModel"
)
X_pending, X_observed = _get_X_pending_and_observed(
Xs=self.Xs,
objective_weights=torch_opt_config.objective_weights,
bounds=search_space_digest.bounds,
pending_observations=torch_opt_config.pending_observations,
outcome_constraints=torch_opt_config.outcome_constraints,
linear_constraints=torch_opt_config.linear_constraints,
fixed_features=torch_opt_config.fixed_features,
)
model = self.model
# subset model only to the outcomes we need for the optimization 357
if options.get(Keys.SUBSET_MODEL, True):
subset_model_results = subset_model(
model=model,
objective_weights=torch_opt_config.objective_weights,
outcome_constraints=torch_opt_config.outcome_constraints,
)
model = subset_model_results.model
objective_weights = subset_model_results.objective_weights
outcome_constraints = subset_model_results.outcome_constraints
else:
objective_weights = torch_opt_config.objective_weights
outcome_constraints = torch_opt_config.outcome_constraints
bounds_ = torch.tensor(
search_space_digest.bounds, dtype=self.dtype, device=self.device
)
bounds_ = bounds_.transpose(0, 1)
botorch_rounding_func = get_rounding_func(torch_opt_config.rounding_func)
from botorch.exceptions.errors import UnsupportedError
# pyre-fixme[53]: Captured variable `X_observed` is not annotated.
# pyre-fixme[53]: Captured variable `X_pending` is not annotated.
# pyre-fixme[53]: Captured variable `acf_options` is not annotated.
# pyre-fixme[53]: Captured variable `botorch_rounding_func` is not annotated.
# pyre-fixme[53]: Captured variable `bounds_` is not annotated.
# pyre-fixme[53]: Captured variable `model` is not annotated.
# pyre-fixme[53]: Captured variable `objective_weights` is not annotated.
# pyre-fixme[53]: Captured variable `optimizer_options` is not annotated.
# pyre-fixme[53]: Captured variable `outcome_constraints` is not annotated.
def make_and_optimize_acqf(override_qmc: bool = False) -> Tuple[Tensor, Tensor]:
add_kwargs = {"qmc": False} if override_qmc else {}
acquisition_function = self.acqf_constructor(
model=model,
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
X_observed=X_observed,
X_pending=X_pending,
**acf_options,
**add_kwargs,
)
acquisition_function = checked_cast(
AcquisitionFunction, acquisition_function
)
# pyre-ignore: [28]
candidates, expected_acquisition_value = self.acqf_optimizer(
acq_function=checked_cast(AcquisitionFunction, acquisition_function),
bounds=bounds_,
n=n,
inequality_constraints=_to_inequality_constraints(
linear_constraints=torch_opt_config.linear_constraints
),
fixed_features=torch_opt_config.fixed_features,
rounding_func=botorch_rounding_func,
**optimizer_options,
)
return candidates, expected_acquisition_value
try:
candidates, expected_acquisition_value = make_and_optimize_acqf()
except UnsupportedError as e: # untested
if "SobolQMCSampler only supports dimensions" in str(e):
# dimension too large for Sobol, let's use IID
candidates, expected_acquisition_value = make_and_optimize_acqf(
override_qmc=True
)
else:
raise e
gen_metadata = {}
if expected_acquisition_value.numel() > 0:
gen_metadata[
"expected_acquisition_value"
] = expected_acquisition_value.tolist()
return TorchGenResults(
points=candidates.detach().cpu(),
weights=torch.ones(n, dtype=self.dtype),
gen_metadata=gen_metadata,
)
@copy_doc(TorchModel.best_point)
def best_point(
self,
search_space_digest: SearchSpaceDigest,
torch_opt_config: TorchOptConfig,
) -> Optional[Tensor]:
if torch_opt_config.is_moo:
raise NotImplementedError(
"Best observed point is incompatible with MOO problems."
)
return self.best_point_recommender( # pyre-ignore [28]
model=self,
bounds=search_space_digest.bounds,
objective_weights=torch_opt_config.objective_weights,
outcome_constraints=torch_opt_config.outcome_constraints,
linear_constraints=torch_opt_config.linear_constraints,
fixed_features=torch_opt_config.fixed_features,
model_gen_options=torch_opt_config.model_gen_options,
target_fidelities=search_space_digest.target_fidelities,
)
@copy_doc(TorchModel.cross_validate)
def cross_validate( # pyre-ignore [14]: `search_space_digest` arg not needed here
self,
datasets: List[SupervisedDataset],
X_test: Tensor,
**kwargs: Any,
) -> Tuple[Tensor, Tensor]:
if self._model is None:
raise RuntimeError("Cannot cross-validate model that has not been fitted.")
if self.refit_on_cv:
state_dict = None
else:
state_dict = deepcopy(self.model.state_dict())
Xs, Ys, Yvars = _datasets_to_legacy_inputs(datasets=datasets)
model = self.model_constructor( # pyre-ignore: [28]
Xs=Xs,
Ys=Ys,
Yvars=Yvars,
task_features=self.task_features,
state_dict=state_dict,
fidelity_features=self.fidelity_features,
metric_names=self.metric_names,
refit_model=self.refit_on_cv,
use_input_warping=self.use_input_warping,
use_loocv_pseudo_likelihood=self.use_loocv_pseudo_likelihood,
**self._kwargs,
)
return self.model_predictor(model=model, X=X_test) # pyre-ignore: [28]
@copy_doc(TorchModel.update)
def update( # pyre-ignore [14]: `search_space_digest` arg not needed here
self,
datasets: List[SupervisedDataset],
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
**kwargs: Any,
) -> None:
if self._model is None:
raise RuntimeError("Cannot update model that has not been fitted.")
Xs, Ys, Yvars = _datasets_to_legacy_inputs(datasets=datasets)
self.Xs = Xs
self.Ys = Ys
self.Yvars = Yvars
if self.refit_on_update and not self.warm_start_refitting:
state_dict = None
else:
state_dict = deepcopy(self.model.state_dict())
self._model = self.model_constructor( # pyre-ignore: [28]
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
task_features=self.task_features,
state_dict=state_dict,
fidelity_features=self.fidelity_features,
metric_names=self.metric_names,
refit_model=self.refit_on_update,
use_input_warping=self.use_input_warping,
use_loocv_pseudo_likelihood=self.use_loocv_pseudo_likelihood,
**self._kwargs,
)
def feature_importances(self) -> np.ndarray:
return get_feature_importances_from_botorch_model(model=self._model)
@property
def search_space_digest(self) -> SearchSpaceDigest:
if self._search_space_digest is None:
raise RuntimeError(
"`search_space_digest` is not initialized. Please fit the model first."
)
return self._search_space_digest
@search_space_digest.setter
def search_space_digest(self, value: SearchSpaceDigest) -> None:
raise RuntimeError("Setting search_space_digest manually is disallowed.")
@property
def model(self) -> Model:
if self._model is None:
raise RuntimeError(
"`model` is not initialized. Please fit the model first."
)
return self._model
@model.setter
def model(self, model: Model) -> None:
self._model = model # there are a few places that set model directly
def get_rounding_func(
rounding_func: Optional[Callable[[Tensor], Tensor]]
) -> Optional[Callable[[Tensor], Tensor]]:
if rounding_func is None:
botorch_rounding_func = rounding_func
else:
# make sure rounding_func is properly applied to q- and t-batches
def botorch_rounding_func(X: Tensor) -> Tensor:
batch_shape, d = X.shape[:-1], X.shape[-1]
X_round = torch.stack(
[rounding_func(x) for x in X.view(-1, d)] # pyre-ignore: [16]
)
return X_round.view(*batch_shape, d)
return botorch_rounding_func
def get_feature_importances_from_botorch_model(
model: Union[Model, ModuleList, None],
) -> np.ndarray:
"""Get feature importances from a list of BoTorch models.
Args:
models: BoTorch model to get feature importances from.
Returns:
The feature importances as a numpy array where each row sums to 1.
"""
if model is None:
raise RuntimeError(
"Cannot calculate feature_importances without a fitted model."
"Call `fit` first."
)
elif isinstance(model, ModelList):
models = model.models
else:
models = [model]
lengthscales = []
for m in models:
try:
ls = m.covar_module.base_kernel.lengthscale
except AttributeError:
ls = None
if ls is None or ls.shape[-1] != m.train_inputs[0].shape[-1]:
# TODO: We could potentially set the feature importances to NaN in this
# case, but this require knowing the batch dimension of this model.
# Consider supporting in the future.
raise NotImplementedError(
"Failed to extract lengthscales from `m.covar_module.base_kernel`"
)
if ls.ndim == 2:
ls = ls.unsqueeze(0)
if is_fully_bayesian(m): # Take the median over the MCMC samples
ls = torch.quantile(ls, q=0.5, dim=0, keepdim=True)
lengthscales.append(ls)
lengthscales = torch.cat(lengthscales, dim=0)
feature_importances = (1 / lengthscales).detach().cpu() # pyre-ignore
# Make sure the sum of feature importances is 1.0 for each metric
feature_importances /= feature_importances.sum(dim=-1, keepdim=True)
return feature_importances.numpy()
|
144362ed77e105831aebc63e694d2f14f267eda4
|
ca593f5a272ce0478ba6f52d2670cb9dd8564b00
|
/mycroft/util/time.py
|
0c86e0ad8db6d7e8b8511b9cc1ce8c05742d5234
|
[
"Apache-2.0"
] |
permissive
|
MycroftAI/mycroft-core
|
d41ce0fccfe4c29d8d802dcc6bcf583dc356d9ce
|
8051e4e1f89d5ed1f63f06db5d3570371ae92e5d
|
refs/heads/master
| 2023-08-23T17:45:10.569985
| 2021-12-10T04:51:59
| 2021-12-10T04:51:59
| 59,299,524
| 6,838
| 1,719
|
Apache-2.0
| 2023-08-15T10:25:32
| 2016-05-20T14:11:07
|
Python
|
UTF-8
|
Python
| false
| false
| 3,213
|
py
|
time.py
|
#
# Copyright 2018 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Time utils for getting and converting datetime objects for the Mycroft
system. This time is based on the setting in the Mycroft config and may or
may not match the system locale.
"""
from datetime import datetime
from dateutil.tz import gettz, tzlocal
def default_timezone():
"""Get the default timezone
Based on user location settings location.timezone.code or
the default system value if no setting exists.
Returns:
(datetime.tzinfo): Definition of the default timezone
"""
try:
# Obtain from user's configurated settings
# location.timezone.code (e.g. "America/Chicago")
# location.timezone.name (e.g. "Central Standard Time")
# location.timezone.offset (e.g. -21600000)
from mycroft.configuration import Configuration
config = Configuration.get()
code = config["location"]["timezone"]["code"]
return gettz(code)
except Exception:
# Just go with system default timezone
return tzlocal()
def now_utc():
"""Retrieve the current time in UTC
Returns:
(datetime): The current time in Universal Time, aka GMT
"""
return to_utc(datetime.utcnow())
def now_local(tz=None):
"""Retrieve the current time
Args:
tz (datetime.tzinfo, optional): Timezone, default to user's settings
Returns:
(datetime): The current time
"""
if not tz:
tz = default_timezone()
return datetime.now(tz)
def to_utc(dt):
"""Convert a datetime with timezone info to a UTC datetime
Args:
dt (datetime): A datetime (presumably in some local zone)
Returns:
(datetime): time converted to UTC
"""
tzUTC = gettz("UTC")
if dt.tzinfo:
return dt.astimezone(tzUTC)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tzUTC)
def to_local(dt):
"""Convert a datetime to the user's local timezone
Args:
dt (datetime): A datetime (if no timezone, defaults to UTC)
Returns:
(datetime): time converted to the local timezone
"""
tz = default_timezone()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
def to_system(dt):
"""Convert a datetime to the system's local timezone
Args:
dt (datetime): A datetime (if no timezone, assumed to be UTC)
Returns:
(datetime): time converted to the operation system's timezone
"""
tz = tzlocal()
if dt.tzinfo:
return dt.astimezone(tz)
else:
return dt.replace(tzinfo=gettz("UTC")).astimezone(tz)
|
94088166d587e14421796211b5a1f7db35323466
|
23652304566b1869ca65b95b116ee43d16e134f3
|
/h/streamer/views.py
|
efd96220b0874d215d78d8e4b722adb74a91e004
|
[
"BSD-2-Clause",
"BSD-3-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
hypothesis/h
|
29399a26990856c336b05022e827541dd8aeedab
|
232446d776fdb906d2fb253cf0a409c6813a08d6
|
refs/heads/main
| 2023-08-30T16:21:33.754658
| 2023-08-30T09:26:50
| 2023-08-30T09:40:48
| 3,910,945
| 2,558
| 452
|
BSD-2-Clause
| 2023-09-14T11:25:06
| 2012-04-02T19:56:59
|
Python
|
UTF-8
|
Python
| false
| false
| 1,687
|
py
|
views.py
|
from pyramid.view import forbidden_view_config, notfound_view_config, view_config
from ws4py.exc import HandshakeError
from ws4py.server.wsgiutils import WebSocketWSGIApplication
from h.streamer import streamer, websocket
@view_config(route_name="ws")
def websocket_view(request):
# Provide environment which the WebSocket handler can use...
request.environ.update(
{
"h.ws.streamer_work_queue": streamer.WORK_QUEUE,
"h.ws.identity": request.identity,
}
)
app = WebSocketWSGIApplication(handler_cls=websocket.WebSocket)
return request.get_response(app)
@notfound_view_config(renderer="json")
def notfound(_exc, request):
request.response.status_code = 404
return {
"ok": False,
"error": "not_found",
"reason": "These are not the droids you are looking for.",
}
@forbidden_view_config(renderer="json")
def forbidden(_exc, request):
request.response.status_code = 403
return {
"ok": False,
"error": "forbidden",
"reason": "You are not allowed here. Are you connecting from an "
"allowed origin?",
}
@view_config(context=HandshakeError, renderer="json")
def error_badhandshake(_exc, request):
request.response.status_code = 400
return {
"ok": False,
"error": "bad_handshake",
"reason": "Handshake failed. Are you a WebSocket client?",
}
@view_config(context=Exception, renderer="json")
def error(_context, request):
request.response.status_code = 500
return {
"ok": False,
"error": "server_error",
"reason": "An unexpected error occurred and has been reported.",
}
|
7cbbafc0e8ee1da7b567ca85e002978876e0999e
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/Programming_for_Computations/test_diffeq2.py
|
910145e70ebf136c36caf8380a01337f615b0ad8
|
[] |
no_license
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,809
|
py
|
test_diffeq2.py
|
"""Verify the implementation of the diffusion equation."""
from ode_system_FE import ode_FE
from numpy import linspace, zeros, linspace, abs
def s(t):
return u_exact(0, t)
def dsdt(t):
return 1
def f(x, t):
return 1 - 2*beta
def rhs(u, t):
N = len(u) - 1
rhs = zeros(N+1)
rhs[0] = dsdt(t)
for i in range(1, N):
rhs[i] = (beta/dx**2)*(u[i+1] - 2*u[i] + u[i-1]) + f(x[i], t)
rhs[N] = (2*beta/dx**2)*(u[i-1] - u[i]) + f(x[N], t)
return rhs
def u_exact(x, t):
return t + (x - L)**2
def verify_sympy():
import sympy as sp
beta, x, t, dx, dt, L = sp.symbols('beta x t dx dt L')
u = lambda x, t: 3*t + 2*x
f = lambda x, t, beta, L: 3
s = lambda t: u(0, t)
N = 4
rhs = [None]*(N+1)
rhs[0] = sp.diff(s(t), t)
for i in range(1, N):
rhs[i] = (beta/dx**2)*(u(x+dx,t) - 2*u(x,t) + u(x-dx,t)) + \
f(x, t, beta, L)
rhs[N] = (beta/dx**2)*(u(x-dx,t) + 2*dx*2 - 2*u(x,t) + u(x-dx,t)) + \
f(x, t, beta, L)
#rhs[N] = (2*beta/dx**2)*(u(x-dx,t) - u(x,t)) + f(x, t, beta, L)
for i in range(len(rhs)):
rhs[i] = sp.simplify(sp.expand(rhs[i])).subs(x, i*dx)
print rhs[i]
lhs = (u(x, t+dt) - u(x,t))/dt
lhs = sp.simplify(sp.expand(lhs.subs(x, i*dx)))
print lhs
print sp.simplify(lhs - rhs[i])
print '---'
L = 1.5
beta = 0.5
N = 40
x = linspace(0, L, N+1)
dx = x[1] - x[0]
u = zeros(N+1)
U_0 = zeros(N+1)
U_0[0] = s(0)
U_0[1:] = u_exact(x[1:], 0)
dt = dx**2/(2*beta)
print 'stability limit:', dt
u, t = ode_FE(rhs, U_0, dt, T=1.2)
for i in range(0, u.shape[0]):
diff = abs(u_exact(x, t[i]) - u[i,:]).max()
#print u[i,:]
#print u_exact(x, t[i])
print 'diff=%g at t=%g' % (diff, t[i])
print '---'
verify_sympy()
|
c8ad9438c815fe22dde66ff9e1b56e1cf35a18f3
|
d32819267d481c3224936bb17149939d836ae654
|
/examples/hb_example.py
|
5a815a4a42e925f4e2728fdff0e6412dcf230922
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
seba-1511/randopt
|
18cf039b9e539986df5215f5277f23a942569f23
|
74cefcc734c6a38418151025b0a4d8b6cb41eb14
|
refs/heads/master
| 2021-05-01T19:46:00.117966
| 2020-04-27T20:47:43
| 2020-04-27T20:47:43
| 71,821,410
| 116
| 9
|
Apache-2.0
| 2018-09-29T08:39:14
| 2016-10-24T18:52:04
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,051
|
py
|
hb_example.py
|
#!/usr/bin/env python
import randopt as ro
def loss(x):
return x**2
def dloss(x):
return 2.0*x
def run_exp():
param = 10.0
num_epochs = 10
e = ro.HyperBand('hb_example', {
'alpha': ro.Uniform(low=0.0, high=0.01)
}, num_iter=num_epochs)
e.sample_all_params()
for epoch in range(num_epochs):
param = param - e.alpha * dloss(param)
if e.stop(loss(param)):
return e
e.add_result(loss(param))
return e
if __name__ == '__main__':
num_runs = 100
for run in range(num_runs):
e = run_exp()
print('optimal value: ', e.minimum())
import os
import json
max_lr = 0.0
for fname in os.listdir(e.hyperband_path):
base, ext = os.path.splitext(fname)
if 'json' in ext:
fname = os.path.join(e.hyperband_path, fname)
with open(fname, 'r') as f:
res = json.load(f)
if res['alpha'] > max_lr:
max_lr = res['alpha']
print('Max LR tried: ', max_lr)
|
a39d0630873a3b4b12b6a3139eaa8f27e3c29eb2
|
9907672fcd81ab73ac63b2a83422a82bf31eadde
|
/hackerrank/input.py
|
10c6d9a90ea8403625b6606fb1134cc0a2bee55e
|
[
"0BSD"
] |
permissive
|
cielavenir/procon
|
bbe1974b9bddb51b76d58722a0686a5b477c4456
|
746e1a91f574f20647e8aaaac0d9e6173f741176
|
refs/heads/master
| 2023-06-21T23:11:24.562546
| 2023-06-11T13:15:15
| 2023-06-11T13:15:15
| 7,557,464
| 137
| 136
| null | 2020-10-20T09:35:52
| 2013-01-11T09:40:26
|
C++
|
UTF-8
|
Python
| false
| false
| 68
|
py
|
input.py
|
#!/usr/bin/python
x,k=map(int,raw_input().split())
print(input()==k)
|
0fa95231bc333007f10a209a6bacfc91c1247524
|
8b93c6fe926241f6c95f981f7dae32414d820148
|
/tools/building_script/py_prebuild.py
|
2f23d14e368782eac8e24e9563682da2b508c462
|
[
"MIT"
] |
permissive
|
lailongwei/llbc
|
e0873c7a34eea6a3fff260f78f1bdbf06520363b
|
2b4ccb3387549f612a6df001f091680bfd9b3adb
|
refs/heads/master
| 2023-08-15T08:22:03.602550
| 2023-08-04T07:31:58
| 2023-08-04T07:31:58
| 45,900,425
| 126
| 48
|
MIT
| 2023-09-11T13:59:49
| 2015-11-10T09:08:46
|
C++
|
UTF-8
|
Python
| false
| false
| 840
|
py
|
py_prebuild.py
|
# -*- coding: utf-8 -*-
"""
pyllbc项目预编译脚本, 用于整合c++向lua提供的方法列表及将python代码统一整合到c++ dll中(以字符串形式)
"""
from time import sleep
from os import path as op
from py_integrator_builder import PyIntegratorBuilder
from native_method_collector import PyNativeMethodCollector as PyNMC
from c import Cfg
def main():
print('Build methods...')
code_path = Cfg.getcodepath()
PyNMC(op.join(code_path, 'common')).build()
PyNMC(op.join(code_path, 'testcase')).build()
PyNMC(op.join(code_path, 'core')).build()
PyNMC(op.join(code_path, 'comm')).build()
PyNMC(op.join(code_path, 'app')).build()
print('Done')
print('Build script integrator...')
PyIntegratorBuilder.build()
print('Done')
sleep(1.618)
if __name__ == '__main__':
main()
|
f22356bfaf10fe1e82b6a9a55e8809f755743d89
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/terraform/checks/resource/azure/AKSIsPaidSku.py
|
13029b9217e0088c18a589d7d071472a643b7d84
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 891
|
py
|
AKSIsPaidSku.py
|
from typing import Any
from checkov.common.models.enums import CheckCategories
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
class AKSIsPaidSku(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure that AKS use the Paid Sku for its SLA"
# AKS clusters should have Uptime
# SLA enabled to ensure availability
# of control plane components
# for production workloads.
id = "CKV_AZURE_170"
supported_resources = ("azurerm_kubernetes_cluster",)
categories = (CheckCategories.GENERAL_SECURITY,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return "sku_tier"
def get_expected_value(self) -> Any:
return "Standard"
check = AKSIsPaidSku()
|
6719ea274eb53fa88c0d5e7edcf51253965eef8d
|
7b7c570b30d6d7a0e9b904c7cb378cfb0d0f0e07
|
/examples/xgboost/xgboost_sklearn/train.py
|
97fb33c85265f3f2df116f51196663a31a4e3a08
|
[
"Apache-2.0"
] |
permissive
|
mlflow/mlflow
|
ca97bfbbf32f8e59f454e428f5e46eb3d34d062f
|
37298ffafcd34002352d01d579d4524790544267
|
refs/heads/master
| 2023-09-01T13:15:53.902815
| 2023-09-01T09:00:42
| 2023-09-01T09:00:42
| 136,202,695
| 14,102
| 3,748
|
Apache-2.0
| 2023-09-14T21:52:42
| 2018-06-05T16:05:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,059
|
py
|
train.py
|
from pprint import pprint
import xgboost as xgb
from sklearn.datasets import load_diabetes
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from utils import fetch_logged_data
import mlflow
import mlflow.xgboost
def main():
# prepare example dataset
X, y = load_diabetes(return_X_y=True, as_frame=True)
X_train, X_test, y_train, y_test = train_test_split(X, y)
# enable auto logging
# this includes xgboost.sklearn estimators
mlflow.xgboost.autolog()
regressor = xgb.XGBRegressor(n_estimators=20, reg_lambda=1, gamma=0, max_depth=3)
regressor.fit(X_train, y_train, eval_set=[(X_test, y_test)])
y_pred = regressor.predict(X_test)
mean_squared_error(y_test, y_pred)
run_id = mlflow.last_active_run().info.run_id
print(f"Logged data and model in run {run_id}")
# show logged data
for key, data in fetch_logged_data(run_id).items():
print(f"\n---------- logged {key} ----------")
pprint(data)
if __name__ == "__main__":
main()
|
2f65355bd87921ce62030ac9509118bc7a4a6f26
|
48d6a692666f85a353cd2c7a89581e2207fdd6a0
|
/tests/instruments/mksinst/test_mks937b.py
|
385b625dfaf11a42924e359f1384c8bb0e60cab8
|
[
"MIT"
] |
permissive
|
pymeasure/pymeasure
|
21fa07592adfb63944fd35723d82853133e103be
|
c04cfc05dc48fa5f3c4ff1e0f223751da6e7d8b5
|
refs/heads/master
| 2023-09-02T02:07:05.118773
| 2023-09-01T18:27:43
| 2023-09-01T18:27:43
| 18,864,038
| 271
| 172
|
MIT
| 2023-09-11T16:42:56
| 2014-04-17T02:31:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,801
|
py
|
test_mks937b.py
|
#
# This file is part of the PyMeasure package.
#
# Copyright (c) 2013-2023 PyMeasure Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import pytest
from pymeasure.test import expected_protocol
from pymeasure.instruments.mksinst.mks937b import MKS937B
def test_pressure():
"""Verify the communication of the pressure getter."""
with expected_protocol(
MKS937B,
[("@253PR1?", "@253ACK1.10e-9"),
(None, b"FF")],
) as inst:
assert inst.ch_1.pressure == pytest.approx(1.1e-9)
def test_ion_gauge_status():
"""Verify the communication of the ion gauge status getter."""
with expected_protocol(
MKS937B,
[("@253T1?", "@253ACKG"),
(None, b"FF")],
) as inst:
assert inst.ch_1.ion_gauge_status == "Good"
def test_ion_gauge_status_invalid_channel():
"""Ion gauge status does not exist on all channels."""
with expected_protocol(
MKS937B,
[],
) as inst:
with pytest.raises(AttributeError):
inst.ch_2.ion_gauge_status
def test_unit_setter():
"""Verify the communication of the unit setter."""
with expected_protocol(
MKS937B,
[("@253U!TORR", "@253ACKTORR"),
(None, b"FF")],
) as inst:
inst.unit = "Torr"
def test_unit_getter():
"""Verify the communication of the unit getter."""
with expected_protocol(
MKS937B,
[("@253U?", "@253ACKTORR"),
(None, b"FF")],
) as inst:
assert inst.unit == "Torr"
def test_power_enabled():
"""Verify the communication of the channel power getter."""
with expected_protocol(
MKS937B,
[("@253CP1?", "@253ACKON"),
(None, b"FF")],
) as inst:
assert inst.ch_1.power_enabled is True
|
3741c03c5074f95223e6ec8f1d1ffcb35dfa84a0
|
56a77194fc0cd6087b0c2ca1fb6dc0de64b8a58a
|
/applications/FemToDemApplication/python_scripts/MainDEM_for_coupling.py
|
f1dc35a2f01ecc55a09a9f9fb7c92f1b39231eb5
|
[
"BSD-3-Clause"
] |
permissive
|
KratosMultiphysics/Kratos
|
82b902a2266625b25f17239b42da958611a4b9c5
|
366949ec4e3651702edc6ac3061d2988f10dd271
|
refs/heads/master
| 2023-08-30T20:31:37.818693
| 2023-08-30T18:01:01
| 2023-08-30T18:01:01
| 81,815,495
| 994
| 285
|
NOASSERTION
| 2023-09-14T13:22:43
| 2017-02-13T10:58:24
|
C++
|
UTF-8
|
Python
| false
| false
| 758
|
py
|
MainDEM_for_coupling.py
|
import KratosMultiphysics.DEMApplication as DEM
import KratosMultiphysics.DEMApplication.DEM_analysis_stage as MainDEM
class DEM_for_coupling_Solution(MainDEM.DEMAnalysisStage):
def SetAnalyticParticleWatcher(self):
pass
def AddVariables(self):
super(DEM_for_coupling_Solution, self).AddVariables()
# For averaging forces when substepping
self.spheres_model_part.AddNodalSolutionStepVariable(DEM.CONTACT_IMPULSE)
def GraphicalOutputInitialize(self):
pass
def PrintResultsForGid(self, time):
pass
def GraphicalOutputFinalize(self):
pass
def PrintResults(self):
pass
def RunAnalytics(self, time, is_time_to_print=True):
pass
|
015919b6337871c48075b368b7c7d512373d3820
|
13800b7827598e76428a335559b7bf11867ec2f0
|
/python/ccxt/tidex.py
|
7b9761bb28d1602fff1b6401c60845fd1c8b3ff2
|
[
"MIT"
] |
permissive
|
ccxt/ccxt
|
b40a0466f5c430a3c0c6026552ae697aa80ba6c6
|
e4065f6a490e6fc4dd7a72b375428b2faa570668
|
refs/heads/master
| 2023-09-04T03:41:29.787733
| 2023-09-03T19:25:57
| 2023-09-03T19:25:57
| 91,253,698
| 30,798
| 8,190
|
MIT
| 2023-09-14T21:59:09
| 2017-05-14T15:41:56
|
Python
|
UTF-8
|
Python
| false
| false
| 42,557
|
py
|
tidex.py
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.abstract.tidex import ImplicitAPI
import hashlib
from ccxt.base.types import OrderSide
from ccxt.base.types import OrderType
from typing import Optional
from typing import List
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class tidex(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(tidex, self).describe(), {
'id': 'tidex',
'name': 'Tidex',
'countries': ['UK'],
'rateLimit': 2000,
'version': '3',
'userAgent': self.userAgents['chrome'],
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'addMargin': False,
'cancelOrder': True,
'createMarketOrder': False,
'createOrder': True,
'createReduceOnlyOrder': False,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchCurrencies': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRateHistory': False,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchLeverage': False,
'fetchLeverageTiers': False,
'fetchMarginMode': False,
'fetchMarkets': True,
'fetchMarkOHLCV': False,
'fetchMyTrades': True,
'fetchOpenInterestHistory': False,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderBooks': True,
'fetchPosition': False,
'fetchPositionMode': False,
'fetchPositions': False,
'fetchPositionsRisk': False,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'reduceMargin': False,
'setLeverage': False,
'setMarginMode': False,
'setPositionMode': False,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/30781780-03149dc4-a12e-11e7-82bb-313b269d24d4.jpg',
'api': {
'web': 'https://gate.tidex.com/api',
'public': 'https://api.tidex.com/api/3',
'private': 'https://api.tidex.com/tapi',
},
'www': 'https://tidex.com',
'doc': 'https://tidex.com/exchange/public-api',
'referral': 'https://tidex.com/exchange',
'fees': [
'https://tidex.com/exchange/assets-spec',
'https://tidex.com/exchange/pairs-spec',
],
},
'api': {
'web': {
'get': [
'currency',
'pairs',
'tickers',
'orders',
'ordershistory',
'trade-data',
'trade-data/{id}',
],
},
'public': {
'get': [
'info',
'ticker/{pair}',
'depth/{pair}',
'trades/{pair}',
],
},
'private': {
'post': [
'getInfoExt',
'getInfo',
'Trade',
'ActiveOrders',
'OrderInfo',
'CancelOrder',
'TradeHistory',
'getDepositAddress',
'createWithdraw',
'getWithdraw',
],
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'taker': self.parse_number('0.001'),
'maker': self.parse_number('0.001'),
},
},
'commonCurrencies': {
'DSH': 'DASH',
'EMGO': 'MGO',
'MGO': 'WMGO',
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'803': InvalidOrder, # "Count could not be less than 0.001."(selling below minAmount)
'804': InvalidOrder, # "Count could not be more than 10000."(buying above maxAmount)
'805': InvalidOrder, # "price could not be less than X."(minPrice violation on buy & sell)
'806': InvalidOrder, # "price could not be more than X."(maxPrice violation on buy & sell)
'807': InvalidOrder, # "cost could not be less than X."(minCost violation on buy & sell)
'831': InsufficientFunds, # "Not enougth X to create buy order."(buying with balance.quote < order.cost)
'832': InsufficientFunds, # "Not enougth X to create sell order."(selling with balance.base < order.amount)
'833': OrderNotFound, # "Order with id X was not found."(cancelling non-existent, closed and cancelled order)
},
'broad': {
'Invalid pair name': ExchangeError, # {"success":0,"error":"Invalid pair name: btc_eth"}
'invalid api key': AuthenticationError,
'invalid sign': AuthenticationError,
'api key dont have trade permission': AuthenticationError,
'invalid parameter': InvalidOrder,
'invalid order': InvalidOrder,
'Requests too often': DDoSProtection,
'not available': ExchangeNotAvailable,
'data unavailable': ExchangeNotAvailable,
'external service unavailable': ExchangeNotAvailable,
'IP restricted': PermissionDenied, # {"success":0,"code":0,"error":"IP restricted(223.xxx.xxx.xxx)"}
},
},
'options': {
'fetchTickersMaxLength': 2048,
},
'orders': {}, # orders cache / emulation
})
def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: an associative dictionary of currencies
"""
response = self.webGetCurrency(params)
#
# [
# {
# "id":2,
# "symbol":"BTC",
# "type":2,
# "name":"Bitcoin",
# "amountPoint":8,
# "depositEnable":true,
# "depositMinAmount":0.0005,
# "withdrawEnable":true,
# "withdrawFee":0.0004,
# "withdrawMinAmount":0.0005,
# "settings":{
# "Blockchain":"https://blockchair.com/bitcoin/",
# "TxUrl":"https://blockchair.com/bitcoin/transaction/{0}",
# "AddrUrl":"https://blockchair.com/bitcoin/address/{0}",
# "ConfirmationCount":3,
# "NeedMemo":false
# },
# "visible":true,
# "isDelisted":false
# }
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
code = self.safe_currency_code(id)
visible = self.safe_value(currency, 'visible')
active = visible is True
withdrawEnable = self.safe_value(currency, 'withdrawEnable', True)
depositEnable = self.safe_value(currency, 'depositEnable', True)
if not withdrawEnable or not depositEnable:
active = False
name = self.safe_string(currency, 'name')
fee = self.safe_number(currency, 'withdrawFee')
result[code] = {
'id': id,
'code': code,
'name': name,
'active': active,
'deposit': depositEnable,
'withdraw': withdrawEnable,
'precision': self.parse_number(self.parse_precision(self.safe_string(currency, 'amountPoint'))),
'funding': {
'withdraw': {
'active': withdrawEnable,
'fee': fee,
},
'deposit': {
'active': depositEnable,
'fee': self.parse_number('0'),
},
},
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': self.safe_number(currency, 'withdrawMinAmount'),
'max': None,
},
'deposit': {
'min': self.safe_number(currency, 'depositMinAmount'),
'max': None,
},
},
'info': currency,
}
return result
def fetch_markets(self, params={}):
"""
retrieves data on all markets for tidex
:param dict [params]: extra parameters specific to the exchange api endpoint
:returns dict[]: an array of objects representing market data
"""
response = self.publicGetInfo(params)
#
# {
# "server_time":1615861869,
# "pairs":{
# "ltc_btc":{
# "decimal_places":8,
# "min_price":0.00000001,
# "max_price":3.0,
# "min_amount":0.001,
# "max_amount":1000000.0,
# "min_total":0.0001,
# "hidden":0,
# "fee":0.1,
# },
# },
# }
#
markets = response['pairs']
keys = list(markets.keys())
result = []
for i in range(0, len(keys)):
id = keys[i]
market = markets[id]
baseId, quoteId = id.split('_')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
hidden = self.safe_integer(market, 'hidden')
takerFeeString = self.safe_string(market, 'fee')
takerFeeString = Precise.string_div(takerFeeString, '100')
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (hidden == 0),
'contract': False,
'linear': None,
'inverse': None,
'taker': self.parse_number(takerFeeString),
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'decimal_places'))),
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'decimal_places'))),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': self.safe_number(market, 'min_amount'),
'max': self.safe_number(market, 'max_amount'),
},
'price': {
'min': self.safe_number(market, 'min_price'),
'max': self.safe_number(market, 'max_price'),
},
'cost': {
'min': self.safe_number(market, 'min_total'),
'max': None,
},
},
'info': market,
})
return result
def parse_balance(self, response):
balances = self.safe_value(response, 'return')
timestamp = self.safe_timestamp(balances, 'server_time')
result = {
'info': response,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
funds = self.safe_value(balances, 'funds', {})
currencyIds = list(funds.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
code = self.safe_currency_code(currencyId)
balance = self.safe_value(funds, currencyId, {})
account = self.account()
account['free'] = self.safe_string(balance, 'value')
account['used'] = self.safe_string(balance, 'inOrders')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: a `balance structure <https://github.com/ccxt/ccxt/wiki/Manual#balance-structure>`
"""
self.load_markets()
response = self.privatePostGetInfoExt(params)
#
# {
# "success":1,
# "return":{
# "funds":{
# "btc":{"value":0.0000499885629956,"inOrders":0.0},
# "eth":{"value":0.000000030741708,"inOrders":0.0},
# "tdx":{"value":0.0000000155385356,"inOrders":0.0}
# },
# "rights":{
# "info":true,
# "trade":true,
# "withdraw":false
# },
# "transaction_count":0,
# "open_orders":0,
# "server_time":1619436907
# },
# "stat":{
# "isSuccess":true,
# "serverTime":"00:00:00.0001157",
# "time":"00:00:00.0101364",
# "errors":null
# }
# }
#
return self.parse_balance(response)
def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int [limit]: the maximum amount of order book entries to return
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: A dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['limit'] = limit # default = 150, max = 2000
response = self.publicGetDepthPair(self.extend(request, params))
market_id_in_reponse = (market['id'] in response)
if not market_id_in_reponse:
raise ExchangeError(self.id + ' ' + market['symbol'] + ' order book is empty or not available')
orderbook = response[market['id']]
return self.parse_order_book(orderbook, symbol)
def fetch_order_books(self, symbols: Optional[List[str]] = None, limit: Optional[int] = None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data for multiple markets
:param str[]|None symbols: list of unified market symbols, all symbols fetched if None, default is None
:param int [limit]: max number of entries per orderbook to return, default is None
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: a dictionary of `order book structures <https://github.com/ccxt/ccxt/wiki/Manual#order-book-structure>` indexed by market symbol
"""
self.load_markets()
ids = None
if symbols is None:
ids = '-'.join(self.ids)
# max URL length is 2083 symbols, including http schema, hostname, tld, etc...
if len(ids) > 2048:
numIds = len(self.ids)
raise ExchangeError(self.id + ' fetchOrderBooks() has ' + str(numIds) + ' symbols exceeding max URL length, you are required to specify a list of symbols in the first argument to fetchOrderBooks')
else:
ids = self.market_ids(symbols)
ids = '-'.join(ids)
request = {
'pair': ids,
}
if limit is not None:
request['limit'] = limit # default = 150, max = 2000
response = self.publicGetDepthPair(self.extend(request, params))
result = {}
ids = list(response.keys())
for i in range(0, len(ids)):
id = ids[i]
symbol = self.safe_symbol(id)
result[symbol] = self.parse_order_book(response[id], symbol)
return result
def parse_ticker(self, ticker, market=None):
#
# {
# high: 0.03497582,
# low: 0.03248474,
# avg: 0.03373028,
# vol: 120.11485715062999,
# vol_cur: 3572.24914074,
# last: 0.0337611,
# buy: 0.0337442,
# sell: 0.03377798,
# updated: 1537522009
# }
#
timestamp = self.safe_timestamp(ticker, 'updated')
market = self.safe_market(None, market)
last = self.safe_string(ticker, 'last')
return self.safe_ticker({
'symbol': market['symbol'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high'),
'low': self.safe_string(ticker, 'low'),
'bid': self.safe_string(ticker, 'buy'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'sell'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': self.safe_string(ticker, 'avg'),
'baseVolume': self.safe_string(ticker, 'vol_cur'),
'quoteVolume': self.safe_string(ticker, 'vol'),
'info': ticker,
}, market)
def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param str[]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: a dictionary of `ticker structures <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
self.load_markets()
symbols = self.market_symbols(symbols)
ids = None
if symbols is None:
numIds = len(self.ids)
ids = '-'.join(self.ids)
# max URL length is 2048 symbols, including http schema, hostname, tld, etc...
if len(ids) > self.options['fetchTickersMaxLength']:
maxLength = self.safe_integer(self.options, 'fetchTickersMaxLength', 2048)
raise ArgumentsRequired(self.id + ' fetchTickers() has ' + str(numIds) + ' markets exceeding max URL length for self endpoint(' + str(maxLength) + ' characters), please, specify a list of symbols of interest in the first argument to fetchTickers')
else:
newIds = self.market_ids(symbols)
ids = '-'.join(newIds)
request = {
'pair': ids,
}
response = self.publicGetTickerPair(self.extend(request, params))
result = {}
keys = list(response.keys())
for i in range(0, len(keys)):
id = keys[i]
market = self.safe_market(id)
symbol = market['symbol']
result[symbol] = self.parse_ticker(response[id], market)
return self.filter_by_array(result, 'symbol', symbols)
def fetch_ticker(self, symbol: str, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: a `ticker structure <https://github.com/ccxt/ccxt/wiki/Manual#ticker-structure>`
"""
tickers = self.fetch_tickers([symbol], params)
return tickers[symbol]
def parse_trade(self, trade, market=None):
timestamp = self.safe_timestamp(trade, 'timestamp')
side = self.safe_string(trade, 'type')
if side == 'ask':
side = 'sell'
elif side == 'bid':
side = 'buy'
priceString = self.safe_string_2(trade, 'rate', 'price')
id = self.safe_string_2(trade, 'trade_id', 'tid')
orderId = self.safe_string(trade, 'order_id')
marketId = self.safe_string(trade, 'pair')
symbol = self.safe_symbol(marketId, market)
amountString = self.safe_string(trade, 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
type = 'limit' # all trades are still limit trades
takerOrMaker = None
fee = None
feeCost = self.safe_number(trade, 'commission')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'commissionCurrency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
isYourOrder = self.safe_value(trade, 'is_your_order')
if isYourOrder is not None:
takerOrMaker = 'taker'
if isYourOrder:
takerOrMaker = 'maker'
if fee is None:
fee = self.calculate_fee(symbol, type, side, amount, price, takerOrMaker)
return {
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
'info': trade,
}
def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int [since]: timestamp in ms of the earliest trade to fetch
:param int [limit]: the maximum amount of trades to fetch
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
}
if limit is not None:
request['limit'] = limit
response = self.publicGetTradesPair(self.extend(request, params))
if isinstance(response, list):
numElements = len(response)
if numElements == 0:
return []
return self.parse_trades(response[market['id']], market, since, limit)
def create_order(self, symbol: str, type: OrderType, side: OrderSide, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float [price]: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: an `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
if type == 'market':
raise ExchangeError(self.id + ' createOrder() allows limit orders only')
amountString = str(amount)
priceString = str(price)
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'type': side,
'amount': self.amount_to_precision(symbol, amount),
'rate': self.price_to_precision(symbol, price),
}
response = self.privatePostTrade(self.extend(request, params))
id = None
status = 'open'
filledString = '0.0'
remainingString = amountString
returnResult = self.safe_value(response, 'return')
if returnResult is not None:
id = self.safe_string(returnResult, 'order_id')
if id == '0':
id = self.safe_string(returnResult, 'init_order_id')
status = 'closed'
filledString = self.safe_string(returnResult, 'received', filledString)
remainingString = self.safe_string(returnResult, 'remains', amountString)
timestamp = self.milliseconds()
return self.safe_order({
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'side': side,
'price': priceString,
'cost': None,
'amount': amountString,
'remaining': remainingString,
'filled': filledString,
'fee': None,
# 'trades': self.parse_trades(order['trades'], market),
'info': response,
'clientOrderId': None,
'average': None,
'trades': None,
}, market)
def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: not used by tidex cancelOrder()
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
request = {
'order_id': int(id),
}
return self.privatePostCancelOrder(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'0': 'open',
'1': 'closed',
'2': 'canceled',
'3': 'canceled', # or partially-filled and still open? https://github.com/ccxt/ccxt/issues/1594
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
id = self.safe_string(order, 'id')
status = self.parse_order_status(self.safe_string(order, 'status'))
timestamp = self.safe_timestamp(order, 'timestamp_created')
marketId = self.safe_string(order, 'pair')
symbol = self.safe_symbol(marketId, market)
remaining = None
amount = None
price = self.safe_string(order, 'rate')
if 'start_amount' in order:
amount = self.safe_string(order, 'start_amount')
remaining = self.safe_string(order, 'amount')
else:
remaining = self.safe_string(order, 'amount')
fee = None
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'type': 'limit',
'timeInForce': None,
'postOnly': None,
'side': self.safe_string(order, 'type'),
'price': price,
'stopPrice': None,
'triggerPrice': None,
'cost': None,
'amount': amount,
'remaining': remaining,
'filled': None,
'status': status,
'fee': fee,
'average': None,
'trades': None,
}, market)
def fetch_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
fetches information on an order made by the user
:param str symbol: not used by tidex fetchOrder
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: An `order structure <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
request = {
'order_id': int(id),
}
response = self.privatePostOrderInfo(self.extend(request, params))
id = str(id)
result = self.safe_value(response, 'return', {})
order = self.safe_value(result, id)
return self.parse_order(self.extend({'id': id}, order))
def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all unfilled currently open orders
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch open orders for
:param int [limit]: the maximum number of open orders structures to retrieve
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns Order[]: a list of `order structures <https://github.com/ccxt/ccxt/wiki/Manual#order-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
response = self.privatePostActiveOrders(self.extend(request, params))
#
# {
# "success":1,
# "return":{
# "1255468911":{
# "status":0,
# "pair":"spike_usdt",
# "type":"sell",
# "amount":35028.44256388,
# "rate":0.00199989,
# "timestamp_created":1602684432
# }
# },
# "stat":{
# "isSuccess":true,
# "serverTime":"00:00:00.0000826",
# "time":"00:00:00.0091423",
# "errors":null
# }
# }
#
# it can only return 'open' orders(i.e. no way to fetch 'closed' orders)
orders = self.safe_value(response, 'return', [])
return self.parse_orders(orders, market, since, limit)
def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
fetch all trades made by the user
:param str symbol: unified market symbol
:param int [since]: the earliest time in ms to fetch trades for
:param int [limit]: the maximum number of trades structures to retrieve
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns Trade[]: a list of `trade structures <https://github.com/ccxt/ccxt/wiki/Manual#trade-structure>`
"""
self.load_markets()
market = None
# some derived classes use camelcase notation for request fields
request = {
# 'from': 123456789, # trade ID, from which the display starts numerical 0(test result: liqui ignores self field)
# 'count': 1000, # the number of trades for display numerical, default = 1000
# 'from_id': trade ID, from which the display starts numerical 0
# 'end_id': trade ID on which the display ends numerical ∞
# 'order': 'ASC', # sorting, default = DESC(test result: liqui ignores self field, most recent trade always goes last)
# 'since': 1234567890, # UTC start time, default = 0(test result: liqui ignores self field)
# 'end': 1234567890, # UTC end time, default = ∞(test result: liqui ignores self field)
# 'pair': 'eth_btc', # default = all markets
}
if symbol is not None:
market = self.market(symbol)
request['pair'] = market['id']
if limit is not None:
request['count'] = limit
if since is not None:
request['since'] = self.parse_to_int(since / 1000)
response = self.privatePostTradeHistory(self.extend(request, params))
trades = self.safe_value(response, 'return', [])
return self.parse_trades(trades, market, since, limit)
def withdraw(self, code: str, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str tag:
:param dict [params]: extra parameters specific to the tidex api endpoint
:returns dict: a `transaction structure <https://github.com/ccxt/ccxt/wiki/Manual#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
'amount': float(amount),
'address': address,
}
if tag is not None:
request['memo'] = tag
response = self.privatePostCreateWithdraw(self.extend(request, params))
#
# {
# "success":1,
# "return":{
# "withdraw_id":1111,
# "withdraw_info":{
# "id":1111,
# "asset_id":1,
# "asset":"BTC",
# "amount":0.0093,
# "fee":0.0007,
# "create_time":1575128018,
# "status":"Created",
# "data":{
# "address":"1KFHE7w8BhaENAswwryaoccDb6qcT6DbYY",
# "memo":"memo",
# "tx":null,
# "error":null
# },
# "in_blockchain":false
# }
# }
# }
#
result = self.safe_value(response, 'return', {})
withdrawInfo = self.safe_value(result, 'withdraw_info', {})
return self.parse_transaction(withdrawInfo, currency)
def parse_transaction(self, transaction, currency=None):
#
# {
# "id":1111,
# "asset_id":1,
# "asset":"BTC",
# "amount":0.0093,
# "fee":0.0007,
# "create_time":1575128018,
# "status":"Created",
# "data":{
# "address":"1KFHE7w8BhaENAswwryaoccDb6qcT6DbYY",
# "memo":"memo",
# "tx":null,
# "error":null
# },
# "in_blockchain":false
# }
#
currency = self.safe_currency(None, currency)
return {
'id': self.safe_string(transaction, 'id'),
'txid': None,
'timestamp': None,
'datetime': None,
'network': None,
'addressFrom': None,
'address': None,
'addressTo': None,
'amount': None,
'type': None,
'currency': currency['code'],
'status': None,
'updated': None,
'tagFrom': None,
'tag': None,
'tagTo': None,
'comment': None,
'fee': None,
'info': transaction,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
query = self.omit(params, self.extract_params(path))
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
body = self.urlencode(self.extend({
'nonce': nonce,
'method': path,
}, query))
signature = self.hmac(self.encode(body), self.encode(self.secret), hashlib.sha512)
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Key': self.apiKey,
'Sign': signature,
}
elif api == 'public':
url += '/' + self.implode_params(path, params)
if query:
url += '?' + self.urlencode(query)
else:
url += '/' + self.implode_params(path, params)
if method == 'GET':
if query:
url += '?' + self.urlencode(query)
else:
if query:
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return None # fallback to default error handler
if 'success' in response:
#
# 1 - The exchange only returns the integer 'success' key from their private API
#
# {"success": 1, ...} httpCode == 200
# {"success": 0, ...} httpCode == 200
#
# 2 - However, derived exchanges can return non-integers
#
# It can be a numeric string
# {"sucesss": "1", ...}
# {"sucesss": "0", ...}, httpCode >= 200(can be 403, 502, etc)
#
# Or just a string
# {"success": "true", ...}
# {"success": "false", ...}, httpCode >= 200
#
# Or a boolean
# {"success": True, ...}
# {"success": False, ...}, httpCode >= 200
#
# 3 - Oversimplified, Python PEP8 forbids comparison operator(==) of different types
#
# 4 - We do not want to copy-paste and duplicate the code of self handler to other exchanges derived from Liqui
#
# To cover points 1, 2, 3 and 4 combined self handler should work like self:
#
success = self.safe_value(response, 'success', False)
if isinstance(success, str):
if (success == 'true') or (success == '1'):
success = True
else:
success = False
if not success:
code = self.safe_string(response, 'code')
message = self.safe_string(response, 'error')
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
raise ExchangeError(feedback) # unknown message
return None
|
8cb0661ab1481d051adc8869621f12d0d9604c27
|
4506d81df5ae98078e5cbe79f613514ad12b1c83
|
/nipype/interfaces/spm/tests/test_auto_CreateWarped.py
|
f62694641c91a7a9c0bd9b82752bea927d13eaff
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
nipy/nipype
|
d52eba1b98fda68e24d006ac0d5701fc8a531b9c
|
03a236320fa229299d637ff9af97865a6ae76aca
|
refs/heads/master
| 2023-08-28T10:36:07.020541
| 2023-08-25T13:40:09
| 2023-08-25T13:40:09
| 791,477
| 692
| 569
|
NOASSERTION
| 2023-09-11T06:04:51
| 2010-07-22T17:06:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,387
|
py
|
test_auto_CreateWarped.py
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..preprocess import CreateWarped
def test_CreateWarped_inputs():
input_map = dict(
flowfield_files=dict(
copyfile=False,
field="crt_warped.flowfields",
mandatory=True,
),
image_files=dict(
copyfile=False,
field="crt_warped.images",
mandatory=True,
),
interp=dict(
field="crt_warped.interp",
),
iterations=dict(
field="crt_warped.K",
),
matlab_cmd=dict(),
mfile=dict(
usedefault=True,
),
modulate=dict(
field="crt_warped.jactransf",
),
paths=dict(),
use_mcr=dict(),
use_v8struct=dict(
min_ver="8",
usedefault=True,
),
)
inputs = CreateWarped.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_CreateWarped_outputs():
output_map = dict(
warped_files=dict(),
)
outputs = CreateWarped.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
740c66edcbb6350c2d916d74feb5009ec105f38a
|
a69294c7d5ee75441759b66bc20cce727350bd59
|
/ndb/blobstore.py
|
6bf08ee89c166b467c2aebd82484f9cbf10c7b40
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/datastore-ndb-python
|
58b57437e11104bfe924fca3de2ee28319f2976f
|
59cb209ed95480025d26531fc91397575438d2fe
|
refs/heads/master
| 2023-08-21T01:16:54.021744
| 2022-10-20T23:12:51
| 2022-10-20T23:12:51
| 37,215,291
| 127
| 56
|
Apache-2.0
| 2022-10-20T23:12:53
| 2015-06-10T18:34:30
|
Python
|
UTF-8
|
Python
| false
| false
| 15,489
|
py
|
blobstore.py
|
#
# Copyright 2008 The ndb Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NDB interface for Blobstore.
This currently builds on google.appengine.ext.blobstore and provides a
similar API. The main API differences:
- BlobInfo is an actual Model subclass rather than a pseudo-model class.
To query, use BlobInfo.query() and its documented properties. Other
changes:
- The kind is '__BlobInfo__' (BLOB_INFO_KIND).
- key() is a method returning a BlobKey instance.
- put() and friends are disabled.
- Added class methods get() and friends.
- Added instance methods delete() and friends, and open().
- Instead of BlobReferenceProperty, there's BlobKeyProperty.
- There is no create_rpc() function. Instead, functions and methods
take keyword arguments to specify deadline, callback, and (in some
case) datastore options.
- APIs (get() and delete()) that in ext.blobstore take either a blob
key or a list of blob keys are split into two: one that takes a blob
key and one that takes a list of blob keys, the latter having a name
ending in _multi.
- The following APIs have a synchronous and an async version:
- BlobInfo.get()
- BlobInfo.delete()
- create_upload_url()
- get()
- get_multi()
- delete()
- delete_multi()
- fetch_data()
"""
# TODO: Should delete() and friends accept BlobInfos too?
# TODO: Don't have redundant function/method versions of APIs?
# TODO: Refactor ext.blobstore to reduce duplication of code.
import base64
import email
from .google_imports import api_blobstore as blobstore
from .google_imports import ext_blobstore
from . import model
from . import tasklets
__all__ = ['BLOB_INFO_KIND',
'BLOB_KEY_HEADER',
'BLOB_MIGRATION_KIND',
'BLOB_RANGE_HEADER',
'BlobFetchSizeTooLargeError',
'BlobInfo',
'BlobInfoParseError',
'BlobKey',
'BlobNotFoundError',
'BlobKeyProperty',
'BlobReader',
'DataIndexOutOfRangeError',
'PermissionDeniedError',
'Error',
'InternalError',
'MAX_BLOB_FETCH_SIZE',
'UPLOAD_INFO_CREATION_HEADER',
'create_upload_url',
'create_upload_url_async',
'delete',
'delete_async',
'delete_multi',
'delete_multi_async',
'fetch_data',
'fetch_data_async',
'get',
'get_async',
'get_multi',
'get_multi_async',
'parse_blob_info']
# Exceptions are all imported.
Error = blobstore.Error
InternalError = blobstore.InternalError
BlobFetchSizeTooLargeError = blobstore.BlobFetchSizeTooLargeError
BlobNotFoundError = blobstore.BlobNotFoundError
_CreationFormatError = blobstore._CreationFormatError
DataIndexOutOfRangeError = blobstore.DataIndexOutOfRangeError
PermissionDeniedError = blobstore.PermissionDeniedError
BlobInfoParseError = ext_blobstore.BlobInfoParseError
# So is BlobKey.
BlobKey = blobstore.BlobKey
# And the constants.
BLOB_INFO_KIND = blobstore.BLOB_INFO_KIND
BLOB_MIGRATION_KIND = blobstore.BLOB_MIGRATION_KIND
BLOB_KEY_HEADER = blobstore.BLOB_KEY_HEADER
BLOB_RANGE_HEADER = blobstore.BLOB_RANGE_HEADER
MAX_BLOB_FETCH_SIZE = blobstore.MAX_BLOB_FETCH_SIZE
UPLOAD_INFO_CREATION_HEADER = blobstore.UPLOAD_INFO_CREATION_HEADER
# Re-export BlobKeyProperty from ndb.model for completeness.
BlobKeyProperty = model.BlobKeyProperty
class BlobInfo(model.Model):
"""Information about blobs in Blobstore.
This is a Model subclass that has been doctored to be unwritable.
Properties:
- content_type: Content type of blob.
- creation: Creation date of blob, when it was uploaded.
- filename: Filename user selected from their machine.
- size: Size of uncompressed blob.
- md5_hash: The md5 hash value of the uploaded blob (in hex).
Additional API:
Class methods:
- get(): retrieve a BlobInfo by key
- get_multi(): retrieve a list of BlobInfos by keys
- get_async(), get_multi_async(): async version of get() and get_multi()
Instance methods:
- delete(): delete this blob
- delete_async(): async version of delete()
- key(): return the BlobKey for this blob
- open(): return a BlobReader instance for this blob
Because BlobInfo instances are synchronized with Blobstore, the class
cache policies are off.
Do not subclass this class.
"""
_use_cache = False
_use_memcache = False
content_type = model.StringProperty()
creation = model.DateTimeProperty()
filename = model.StringProperty()
size = model.IntegerProperty()
md5_hash = model.StringProperty()
@classmethod
def _get_kind(cls):
"""Override this to match the datastore entities written by Blobstore."""
return BLOB_INFO_KIND # __BlobInfo__
@classmethod
def get(cls, blob_key, **ctx_options):
"""Retrieve a BlobInfo by key.
Args:
blob_key: A blob key. This may be a str, unicode or BlobKey instance.
**ctx_options: Context options for Model().get_by_id().
Returns:
A BlobInfo entity associated with the provided key, If there was
no such entity, returns None.
"""
fut = cls.get_async(blob_key, **ctx_options)
return fut.get_result()
@classmethod
def get_async(cls, blob_key, **ctx_options):
"""Async version of get()."""
if not isinstance(blob_key, (BlobKey, basestring)):
raise TypeError('Expected blob key, got %r' % (blob_key,))
if 'parent' in ctx_options:
raise TypeError('Parent is not supported')
return cls.get_by_id_async(str(blob_key), **ctx_options)
@classmethod
def get_multi(cls, blob_keys, **ctx_options):
"""Multi-key version of get().
Args:
blob_keys: A list of blob keys.
**ctx_options: Context options for Model().get_by_id().
Returns:
A list whose items are each either a BlobInfo entity or None.
"""
futs = cls.get_multi_async(blob_keys, **ctx_options)
return [fut.get_result() for fut in futs]
@classmethod
def get_multi_async(cls, blob_keys, **ctx_options):
"""Async version of get_multi()."""
for blob_key in blob_keys:
if not isinstance(blob_key, (BlobKey, basestring)):
raise TypeError('Expected blob key, got %r' % (blob_key,))
if 'parent' in ctx_options:
raise TypeError('Parent is not supported')
blob_key_strs = map(str, blob_keys)
keys = [model.Key(BLOB_INFO_KIND, id) for id in blob_key_strs]
return model.get_multi_async(keys, **ctx_options)
def _put_async(self, **ctx_options):
"""Cheap way to make BlobInfo entities read-only."""
raise TypeError('BlobInfo is read-only')
put_async = _put_async
def key(self):
"""Get key for blob.
Returns:
BlobKey instance that identifies this blob.
"""
# TODO: Cache this?
return BlobKey(self._key.id())
def delete(self, **options):
"""Permanently delete this blob from Blobstore.
Args:
**options: Options for create_rpc().
"""
fut = delete_async(self.key(), **options)
fut.get_result()
def delete_async(self, **options):
"""Async version of delete()."""
return delete_async(self.key(), **options) # A Future!
def open(self, *args, **kwds):
"""Returns a BlobReader for this blob.
Args:
*args, **kwargs: Passed to BlobReader constructor.
Returns:
A BlobReader instance.
"""
return BlobReader(self, *args, **kwds)
get = BlobInfo.get
get_async = BlobInfo.get_async
get_multi = BlobInfo.get_multi
get_multi_async = BlobInfo.get_multi_async
def delete(blob_key, **options):
"""Delete a blob from Blobstore.
Args:
blob_key: A blob key. This may be a str, unicode or BlobKey instance.
**options: Options for create_rpc().
"""
fut = delete_async(blob_key, **options)
return fut.get_result()
@tasklets.tasklet
def delete_async(blob_key, **options):
"""Async version of delete()."""
if not isinstance(blob_key, (basestring, BlobKey)):
raise TypeError('Expected blob key, got %r' % (blob_key,))
rpc = blobstore.create_rpc(**options)
yield blobstore.delete_async(blob_key, rpc=rpc)
def delete_multi(blob_keys, **options):
"""Delete blobs from Blobstore.
Args:
blob_keys: A list of blob keys.
**options: Options for create_rpc().
"""
fut = delete_multi_async(blob_keys, **options)
fut.get_result()
@tasklets.tasklet
def delete_multi_async(blob_keys, **options):
"""Async version of delete_multi()."""
if isinstance(blob_keys, (basestring, BlobKey)):
raise TypeError('Expected a list, got %r' % (blob_key,))
rpc = blobstore.create_rpc(**options)
yield blobstore.delete_async(blob_keys, rpc=rpc)
def create_upload_url(success_path,
max_bytes_per_blob=None,
max_bytes_total=None,
**options):
"""Create upload URL for POST form.
Args:
success_path: Path within application to call when POST is successful
and upload is complete.
max_bytes_per_blob: The maximum size in bytes that any one blob in the
upload can be or None for no maximum size.
max_bytes_total: The maximum size in bytes that the aggregate sizes of all
of the blobs in the upload can be or None for no maximum size.
**options: Options for create_rpc().
Returns:
The upload URL.
Raises:
TypeError: If max_bytes_per_blob or max_bytes_total are not integral types.
ValueError: If max_bytes_per_blob or max_bytes_total are not
positive values.
"""
fut = create_upload_url_async(success_path,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
**options)
return fut.get_result()
@tasklets.tasklet
def create_upload_url_async(success_path,
max_bytes_per_blob=None,
max_bytes_total=None,
**options):
"""Async version of create_upload_url()."""
rpc = blobstore.create_rpc(**options)
rpc = blobstore.create_upload_url_async(success_path,
max_bytes_per_blob=max_bytes_per_blob,
max_bytes_total=max_bytes_total,
rpc=rpc)
result = yield rpc
raise tasklets.Return(result)
def parse_blob_info(field_storage):
"""Parse a BlobInfo record from file upload field_storage.
Args:
field_storage: cgi.FieldStorage that represents uploaded blob.
Returns:
BlobInfo record as parsed from the field-storage instance.
None if there was no field_storage.
Raises:
BlobInfoParseError when provided field_storage does not contain enough
information to construct a BlobInfo object.
"""
if field_storage is None:
return None
field_name = field_storage.name
def get_value(dct, name):
value = dct.get(name, None)
if value is None:
raise BlobInfoParseError(
'Field %s has no %s.' % (field_name, name))
return value
filename = get_value(field_storage.disposition_options, 'filename')
blob_key_str = get_value(field_storage.type_options, 'blob-key')
blob_key = BlobKey(blob_key_str)
upload_content = email.message_from_file(field_storage.file)
content_type = get_value(upload_content, 'content-type')
size = get_value(upload_content, 'content-length')
creation_string = get_value(upload_content, UPLOAD_INFO_CREATION_HEADER)
md5_hash_encoded = get_value(upload_content, 'content-md5')
md5_hash = base64.urlsafe_b64decode(md5_hash_encoded)
try:
size = int(size)
except (TypeError, ValueError):
raise BlobInfoParseError(
'%s is not a valid value for %s size.' % (size, field_name))
try:
creation = blobstore._parse_creation(creation_string, field_name)
except blobstore._CreationFormatError, err:
raise BlobInfoParseError(str(err))
return BlobInfo(id=blob_key_str,
content_type=content_type,
creation=creation,
filename=filename,
size=size,
md5_hash=md5_hash,
)
def fetch_data(blob, start_index, end_index, **options):
"""Fetch data for blob.
Fetches a fragment of a blob up to MAX_BLOB_FETCH_SIZE in length. Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from start_index until the end of the blob, which will be
a smaller size than requested. Requesting a fragment which is entirely
outside the boundaries of the blob will return empty string. Attempting
to fetch a negative index will raise an exception.
Args:
blob: BlobInfo, BlobKey, str or unicode representation of BlobKey of
blob to fetch data from.
start_index: Start index of blob data to fetch. May not be negative.
end_index: End index (inclusive) of blob data to fetch. Must be
>= start_index.
**options: Options for create_rpc().
Returns:
str containing partial data of blob. If the indexes are legal but outside
the boundaries of the blob, will return empty string.
Raises:
TypeError if start_index or end_index are not indexes. Also when blob
is not a string, BlobKey or BlobInfo.
DataIndexOutOfRangeError when start_index < 0 or end_index < start_index.
BlobFetchSizeTooLargeError when request blob fragment is larger than
MAX_BLOB_FETCH_SIZE.
BlobNotFoundError when blob does not exist.
"""
fut = fetch_data_async(blob, start_index, end_index, **options)
return fut.get_result()
@tasklets.tasklet
def fetch_data_async(blob, start_index, end_index, **options):
"""Async version of fetch_data()."""
if isinstance(blob, BlobInfo):
blob = blob.key()
rpc = blobstore.create_rpc(**options)
rpc = blobstore.fetch_data_async(blob, start_index, end_index, rpc=rpc)
result = yield rpc
raise tasklets.Return(result)
class BlobReader(ext_blobstore.BlobReader):
"""Provides a read-only file-like interface to a blobstore blob."""
# This just overrides two methods to use the proper versions.
# Hack alert: this can access private attributes of the parent class
# because it has the same class name. (This is a Python feature.)
def __fill_buffer(self, size=0):
"""Fills the internal buffer.
Args:
size: Number of bytes to read. Will be clamped to
[self.__buffer_size, MAX_BLOB_FETCH_SIZE].
"""
read_size = min(max(size, self.__buffer_size), MAX_BLOB_FETCH_SIZE)
self.__buffer = fetch_data(self.__blob_key, self.__position,
self.__position + read_size - 1)
self.__buffer_position = 0
self.__eof = len(self.__buffer) < read_size
@property
def blob_info(self):
"""Returns the BlobInfo for this file."""
if not self.__blob_info:
self.__blob_info = BlobInfo.get(self.__blob_key)
return self.__blob_info
|
1ccbfc2ceac6974673f45d9801da30aa833042e9
|
2bb7bc07df02a17735c2cacc7b2ba0c6de77b63c
|
/tests/utils/collections_test.py
|
b7d9e4ce71af6809bbe15de987cf895ab9cda813
|
[
"Apache-2.0"
] |
permissive
|
Yelp/Tron
|
2c30a301055a732c3b33a39e05dbdcfc84ac8e02
|
958a2e22a6ac733cba043bc4238f3bf2b8048f4b
|
refs/heads/master
| 2023-08-29T11:35:11.716532
| 2023-08-21T19:27:45
| 2023-08-21T19:27:45
| 899,771
| 226
| 53
|
NOASSERTION
| 2023-08-21T19:26:45
| 2010-09-09T20:54:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,746
|
py
|
collections_test.py
|
from unittest import mock
from testifycompat import assert_equal
from testifycompat import assert_in
from testifycompat import assert_not_in
from testifycompat import assert_raises
from testifycompat import setup
from testifycompat import TestCase
from tests.assertions import assert_mock_calls
from tests.testingutils import autospec_method
from tron.utils import collections
class TestMappingCollections(TestCase):
@setup
def setup_collection(self):
self.name = "some_name"
self.collection = collections.MappingCollection(self.name)
def test_filter_by_name(self):
autospec_method(self.collection.remove)
self.collection.update(dict.fromkeys(["c", "d", "e"]))
self.collection.filter_by_name(["a", "c"])
expected = [mock.call(name) for name in ["d", "e"]]
assert_mock_calls(expected, self.collection.remove.mock_calls)
def test_remove_missing(self):
assert_raises(ValueError, self.collection.remove, "name")
def test_remove(self):
name = "the_name"
self.collection[name] = item = mock.Mock()
self.collection.remove(name)
assert_not_in(name, self.collection)
item.disable.assert_called_with()
def test_contains_item_false(self):
mock_item, mock_func = mock.Mock(), mock.Mock()
assert not self.collection.contains_item(mock_item, mock_func)
assert not mock_func.mock_calls
def test_contains_item_not_equal(self):
mock_item, mock_func = mock.Mock(), mock.Mock()
self.collection[mock_item.get_name()] = "other item"
result = self.collection.contains_item(mock_item, mock_func)
assert_equal(result, mock_func.return_value)
mock_func.assert_called_with(mock_item)
def test_contains_item_true(self):
mock_item, mock_func = mock.Mock(), mock.Mock()
self.collection[mock_item.get_name()] = mock_item
assert self.collection.contains_item(mock_item, mock_func)
def test_add_contains(self):
autospec_method(self.collection.contains_item)
item, update_func = mock.Mock(), mock.Mock()
assert not self.collection.add(item, update_func)
assert_not_in(item.get_name(), self.collection)
def test_add_new(self):
autospec_method(self.collection.contains_item, return_value=False)
item, update_func = mock.Mock(), mock.Mock()
assert self.collection.add(item, update_func)
assert_in(item.get_name(), self.collection)
def test_replace(self):
autospec_method(self.collection.add)
item = mock.Mock()
self.collection.replace(item)
self.collection.add.assert_called_with(
item, self.collection.remove_item,
)
|
0bb9316a840008a744744eea29f2b44fc49a46d2
|
57adfd30d44dcec446e55306265b68ee08b51655
|
/dropbox/file_requests.py
|
ab931bb797999fb1b6660a50e30f7b4be2f26431
|
[
"MIT"
] |
permissive
|
dropbox/dropbox-sdk-python
|
610c0cbbfcc7bdacda6da859a8247b56005bbc44
|
487793dff3c5a8a3a76010799dc4803cabdb70f3
|
refs/heads/main
| 2023-09-04T23:06:41.483053
| 2023-05-25T17:17:10
| 2023-05-25T17:17:10
| 37,347,427
| 1,029
| 453
|
MIT
| 2023-08-16T19:51:59
| 2015-06-12T22:23:27
|
Python
|
UTF-8
|
Python
| false
| false
| 48,672
|
py
|
file_requests.py
|
# -*- coding: utf-8 -*-
# Auto-generated by Stone, do not modify.
# @generated
# flake8: noqa
# pylint: skip-file
"""
This namespace contains endpoints and data types for file request operations.
"""
from __future__ import unicode_literals
from stone.backends.python_rsrc import stone_base as bb
from stone.backends.python_rsrc import stone_validators as bv
from dropbox import common
from dropbox import files
class GeneralFileRequestsError(bb.Union):
"""
There is an error accessing the file requests functionality.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar file_requests.GeneralFileRequestsError.disabled_for_team: This user's
Dropbox Business team doesn't allow file requests.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
disabled_for_team = None
# Attribute is overwritten below the class definition
other = None
def is_disabled_for_team(self):
"""
Check if the union tag is ``disabled_for_team``.
:rtype: bool
"""
return self._tag == 'disabled_for_team'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(GeneralFileRequestsError, self)._process_custom_annotations(annotation_type, field_path, processor)
GeneralFileRequestsError_validator = bv.Union(GeneralFileRequestsError)
class CountFileRequestsError(GeneralFileRequestsError):
"""
There was an error counting the file requests.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(CountFileRequestsError, self)._process_custom_annotations(annotation_type, field_path, processor)
CountFileRequestsError_validator = bv.Union(CountFileRequestsError)
class CountFileRequestsResult(bb.Struct):
"""
Result for :meth:`dropbox.dropbox_client.Dropbox.file_requests_count`.
:ivar file_requests.CountFileRequestsResult.file_request_count: The number
file requests owner by this user.
"""
__slots__ = [
'_file_request_count_value',
]
_has_required_fields = True
def __init__(self,
file_request_count=None):
self._file_request_count_value = bb.NOT_SET
if file_request_count is not None:
self.file_request_count = file_request_count
# Instance attribute type: int (validator is set below)
file_request_count = bb.Attribute("file_request_count")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(CountFileRequestsResult, self)._process_custom_annotations(annotation_type, field_path, processor)
CountFileRequestsResult_validator = bv.Struct(CountFileRequestsResult)
class CreateFileRequestArgs(bb.Struct):
"""
Arguments for :meth:`dropbox.dropbox_client.Dropbox.file_requests_create`.
:ivar file_requests.CreateFileRequestArgs.title: The title of the file
request. Must not be empty.
:ivar file_requests.CreateFileRequestArgs.destination: The path of the
folder in the Dropbox where uploaded files will be sent. For apps with
the app folder permission, this will be relative to the app folder.
:ivar file_requests.CreateFileRequestArgs.deadline: The deadline for the
file request. Deadlines can only be set by Professional and Business
accounts.
:ivar file_requests.CreateFileRequestArgs.open: Whether or not the file
request should be open. If the file request is closed, it will not
accept any file submissions, but it can be opened later.
:ivar file_requests.CreateFileRequestArgs.description: A description of the
file request.
"""
__slots__ = [
'_title_value',
'_destination_value',
'_deadline_value',
'_open_value',
'_description_value',
]
_has_required_fields = True
def __init__(self,
title=None,
destination=None,
deadline=None,
open=None,
description=None):
self._title_value = bb.NOT_SET
self._destination_value = bb.NOT_SET
self._deadline_value = bb.NOT_SET
self._open_value = bb.NOT_SET
self._description_value = bb.NOT_SET
if title is not None:
self.title = title
if destination is not None:
self.destination = destination
if deadline is not None:
self.deadline = deadline
if open is not None:
self.open = open
if description is not None:
self.description = description
# Instance attribute type: str (validator is set below)
title = bb.Attribute("title")
# Instance attribute type: str (validator is set below)
destination = bb.Attribute("destination")
# Instance attribute type: FileRequestDeadline (validator is set below)
deadline = bb.Attribute("deadline", nullable=True, user_defined=True)
# Instance attribute type: bool (validator is set below)
open = bb.Attribute("open")
# Instance attribute type: str (validator is set below)
description = bb.Attribute("description", nullable=True)
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(CreateFileRequestArgs, self)._process_custom_annotations(annotation_type, field_path, processor)
CreateFileRequestArgs_validator = bv.Struct(CreateFileRequestArgs)
class FileRequestError(GeneralFileRequestsError):
"""
There is an error with the file request.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar file_requests.FileRequestError.not_found: This file request ID was not
found.
:ivar file_requests.FileRequestError.not_a_folder: The specified path is not
a folder.
:ivar file_requests.FileRequestError.app_lacks_access: This file request is
not accessible to this app. Apps with the app folder permission can only
access file requests in their app folder.
:ivar file_requests.FileRequestError.no_permission: This user doesn't have
permission to access or modify this file request.
:ivar file_requests.FileRequestError.email_unverified: This user's email
address is not verified. File requests are only available on accounts
with a verified email address. Users can verify their email address
`here <https://www.dropbox.com/help/317>`_.
:ivar file_requests.FileRequestError.validation_error: There was an error
validating the request. For example, the title was invalid, or there
were disallowed characters in the destination path.
"""
# Attribute is overwritten below the class definition
not_found = None
# Attribute is overwritten below the class definition
not_a_folder = None
# Attribute is overwritten below the class definition
app_lacks_access = None
# Attribute is overwritten below the class definition
no_permission = None
# Attribute is overwritten below the class definition
email_unverified = None
# Attribute is overwritten below the class definition
validation_error = None
def is_not_found(self):
"""
Check if the union tag is ``not_found``.
:rtype: bool
"""
return self._tag == 'not_found'
def is_not_a_folder(self):
"""
Check if the union tag is ``not_a_folder``.
:rtype: bool
"""
return self._tag == 'not_a_folder'
def is_app_lacks_access(self):
"""
Check if the union tag is ``app_lacks_access``.
:rtype: bool
"""
return self._tag == 'app_lacks_access'
def is_no_permission(self):
"""
Check if the union tag is ``no_permission``.
:rtype: bool
"""
return self._tag == 'no_permission'
def is_email_unverified(self):
"""
Check if the union tag is ``email_unverified``.
:rtype: bool
"""
return self._tag == 'email_unverified'
def is_validation_error(self):
"""
Check if the union tag is ``validation_error``.
:rtype: bool
"""
return self._tag == 'validation_error'
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(FileRequestError, self)._process_custom_annotations(annotation_type, field_path, processor)
FileRequestError_validator = bv.Union(FileRequestError)
class CreateFileRequestError(FileRequestError):
"""
There was an error creating the file request.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar file_requests.CreateFileRequestError.invalid_location: File requests
are not available on the specified folder.
:ivar file_requests.CreateFileRequestError.rate_limit: The user has reached
the rate limit for creating file requests. The limit is currently 4000
file requests total.
"""
# Attribute is overwritten below the class definition
invalid_location = None
# Attribute is overwritten below the class definition
rate_limit = None
def is_invalid_location(self):
"""
Check if the union tag is ``invalid_location``.
:rtype: bool
"""
return self._tag == 'invalid_location'
def is_rate_limit(self):
"""
Check if the union tag is ``rate_limit``.
:rtype: bool
"""
return self._tag == 'rate_limit'
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(CreateFileRequestError, self)._process_custom_annotations(annotation_type, field_path, processor)
CreateFileRequestError_validator = bv.Union(CreateFileRequestError)
class DeleteAllClosedFileRequestsError(FileRequestError):
"""
There was an error deleting all closed file requests.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(DeleteAllClosedFileRequestsError, self)._process_custom_annotations(annotation_type, field_path, processor)
DeleteAllClosedFileRequestsError_validator = bv.Union(DeleteAllClosedFileRequestsError)
class DeleteAllClosedFileRequestsResult(bb.Struct):
"""
Result for
:meth:`dropbox.dropbox_client.Dropbox.file_requests_delete_all_closed`.
:ivar file_requests.DeleteAllClosedFileRequestsResult.file_requests: The
file requests deleted for this user.
"""
__slots__ = [
'_file_requests_value',
]
_has_required_fields = True
def __init__(self,
file_requests=None):
self._file_requests_value = bb.NOT_SET
if file_requests is not None:
self.file_requests = file_requests
# Instance attribute type: list of [FileRequest] (validator is set below)
file_requests = bb.Attribute("file_requests")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(DeleteAllClosedFileRequestsResult, self)._process_custom_annotations(annotation_type, field_path, processor)
DeleteAllClosedFileRequestsResult_validator = bv.Struct(DeleteAllClosedFileRequestsResult)
class DeleteFileRequestArgs(bb.Struct):
"""
Arguments for :meth:`dropbox.dropbox_client.Dropbox.file_requests_delete`.
:ivar file_requests.DeleteFileRequestArgs.ids: List IDs of the file requests
to delete.
"""
__slots__ = [
'_ids_value',
]
_has_required_fields = True
def __init__(self,
ids=None):
self._ids_value = bb.NOT_SET
if ids is not None:
self.ids = ids
# Instance attribute type: list of [str] (validator is set below)
ids = bb.Attribute("ids")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(DeleteFileRequestArgs, self)._process_custom_annotations(annotation_type, field_path, processor)
DeleteFileRequestArgs_validator = bv.Struct(DeleteFileRequestArgs)
class DeleteFileRequestError(FileRequestError):
"""
There was an error deleting these file requests.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar file_requests.DeleteFileRequestError.file_request_open: One or more
file requests currently open.
"""
# Attribute is overwritten below the class definition
file_request_open = None
def is_file_request_open(self):
"""
Check if the union tag is ``file_request_open``.
:rtype: bool
"""
return self._tag == 'file_request_open'
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(DeleteFileRequestError, self)._process_custom_annotations(annotation_type, field_path, processor)
DeleteFileRequestError_validator = bv.Union(DeleteFileRequestError)
class DeleteFileRequestsResult(bb.Struct):
"""
Result for :meth:`dropbox.dropbox_client.Dropbox.file_requests_delete`.
:ivar file_requests.DeleteFileRequestsResult.file_requests: The file
requests deleted by the request.
"""
__slots__ = [
'_file_requests_value',
]
_has_required_fields = True
def __init__(self,
file_requests=None):
self._file_requests_value = bb.NOT_SET
if file_requests is not None:
self.file_requests = file_requests
# Instance attribute type: list of [FileRequest] (validator is set below)
file_requests = bb.Attribute("file_requests")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(DeleteFileRequestsResult, self)._process_custom_annotations(annotation_type, field_path, processor)
DeleteFileRequestsResult_validator = bv.Struct(DeleteFileRequestsResult)
class FileRequest(bb.Struct):
"""
A `file request <https://www.dropbox.com/help/9090>`_ for receiving files
into the user's Dropbox account.
:ivar file_requests.FileRequest.id: The ID of the file request.
:ivar file_requests.FileRequest.url: The URL of the file request.
:ivar file_requests.FileRequest.title: The title of the file request.
:ivar file_requests.FileRequest.destination: The path of the folder in the
Dropbox where uploaded files will be sent. This can be None if the
destination was removed. For apps with the app folder permission, this
will be relative to the app folder.
:ivar file_requests.FileRequest.created: When this file request was created.
:ivar file_requests.FileRequest.deadline: The deadline for this file
request. Only set if the request has a deadline.
:ivar file_requests.FileRequest.is_open: Whether or not the file request is
open. If the file request is closed, it will not accept any more file
submissions.
:ivar file_requests.FileRequest.file_count: The number of files this file
request has received.
:ivar file_requests.FileRequest.description: A description of the file
request.
"""
__slots__ = [
'_id_value',
'_url_value',
'_title_value',
'_destination_value',
'_created_value',
'_deadline_value',
'_is_open_value',
'_file_count_value',
'_description_value',
]
_has_required_fields = True
def __init__(self,
id=None,
url=None,
title=None,
created=None,
is_open=None,
file_count=None,
destination=None,
deadline=None,
description=None):
self._id_value = bb.NOT_SET
self._url_value = bb.NOT_SET
self._title_value = bb.NOT_SET
self._destination_value = bb.NOT_SET
self._created_value = bb.NOT_SET
self._deadline_value = bb.NOT_SET
self._is_open_value = bb.NOT_SET
self._file_count_value = bb.NOT_SET
self._description_value = bb.NOT_SET
if id is not None:
self.id = id
if url is not None:
self.url = url
if title is not None:
self.title = title
if destination is not None:
self.destination = destination
if created is not None:
self.created = created
if deadline is not None:
self.deadline = deadline
if is_open is not None:
self.is_open = is_open
if file_count is not None:
self.file_count = file_count
if description is not None:
self.description = description
# Instance attribute type: str (validator is set below)
id = bb.Attribute("id")
# Instance attribute type: str (validator is set below)
url = bb.Attribute("url")
# Instance attribute type: str (validator is set below)
title = bb.Attribute("title")
# Instance attribute type: str (validator is set below)
destination = bb.Attribute("destination", nullable=True)
# Instance attribute type: datetime.datetime (validator is set below)
created = bb.Attribute("created")
# Instance attribute type: FileRequestDeadline (validator is set below)
deadline = bb.Attribute("deadline", nullable=True, user_defined=True)
# Instance attribute type: bool (validator is set below)
is_open = bb.Attribute("is_open")
# Instance attribute type: int (validator is set below)
file_count = bb.Attribute("file_count")
# Instance attribute type: str (validator is set below)
description = bb.Attribute("description", nullable=True)
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(FileRequest, self)._process_custom_annotations(annotation_type, field_path, processor)
FileRequest_validator = bv.Struct(FileRequest)
class FileRequestDeadline(bb.Struct):
"""
:ivar file_requests.FileRequestDeadline.deadline: The deadline for this file
request.
:ivar file_requests.FileRequestDeadline.allow_late_uploads: If set, allow
uploads after the deadline has passed. These uploads will be marked
overdue.
"""
__slots__ = [
'_deadline_value',
'_allow_late_uploads_value',
]
_has_required_fields = True
def __init__(self,
deadline=None,
allow_late_uploads=None):
self._deadline_value = bb.NOT_SET
self._allow_late_uploads_value = bb.NOT_SET
if deadline is not None:
self.deadline = deadline
if allow_late_uploads is not None:
self.allow_late_uploads = allow_late_uploads
# Instance attribute type: datetime.datetime (validator is set below)
deadline = bb.Attribute("deadline")
# Instance attribute type: GracePeriod (validator is set below)
allow_late_uploads = bb.Attribute("allow_late_uploads", nullable=True, user_defined=True)
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(FileRequestDeadline, self)._process_custom_annotations(annotation_type, field_path, processor)
FileRequestDeadline_validator = bv.Struct(FileRequestDeadline)
class GetFileRequestArgs(bb.Struct):
"""
Arguments for :meth:`dropbox.dropbox_client.Dropbox.file_requests_get`.
:ivar file_requests.GetFileRequestArgs.id: The ID of the file request to
retrieve.
"""
__slots__ = [
'_id_value',
]
_has_required_fields = True
def __init__(self,
id=None):
self._id_value = bb.NOT_SET
if id is not None:
self.id = id
# Instance attribute type: str (validator is set below)
id = bb.Attribute("id")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(GetFileRequestArgs, self)._process_custom_annotations(annotation_type, field_path, processor)
GetFileRequestArgs_validator = bv.Struct(GetFileRequestArgs)
class GetFileRequestError(FileRequestError):
"""
There was an error retrieving the specified file request.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(GetFileRequestError, self)._process_custom_annotations(annotation_type, field_path, processor)
GetFileRequestError_validator = bv.Union(GetFileRequestError)
class GracePeriod(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
one_day = None
# Attribute is overwritten below the class definition
two_days = None
# Attribute is overwritten below the class definition
seven_days = None
# Attribute is overwritten below the class definition
thirty_days = None
# Attribute is overwritten below the class definition
always = None
# Attribute is overwritten below the class definition
other = None
def is_one_day(self):
"""
Check if the union tag is ``one_day``.
:rtype: bool
"""
return self._tag == 'one_day'
def is_two_days(self):
"""
Check if the union tag is ``two_days``.
:rtype: bool
"""
return self._tag == 'two_days'
def is_seven_days(self):
"""
Check if the union tag is ``seven_days``.
:rtype: bool
"""
return self._tag == 'seven_days'
def is_thirty_days(self):
"""
Check if the union tag is ``thirty_days``.
:rtype: bool
"""
return self._tag == 'thirty_days'
def is_always(self):
"""
Check if the union tag is ``always``.
:rtype: bool
"""
return self._tag == 'always'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(GracePeriod, self)._process_custom_annotations(annotation_type, field_path, processor)
GracePeriod_validator = bv.Union(GracePeriod)
class ListFileRequestsArg(bb.Struct):
"""
Arguments for :meth:`dropbox.dropbox_client.Dropbox.file_requests_list`.
:ivar file_requests.ListFileRequestsArg.limit: The maximum number of file
requests that should be returned per request.
"""
__slots__ = [
'_limit_value',
]
_has_required_fields = False
def __init__(self,
limit=None):
self._limit_value = bb.NOT_SET
if limit is not None:
self.limit = limit
# Instance attribute type: int (validator is set below)
limit = bb.Attribute("limit")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(ListFileRequestsArg, self)._process_custom_annotations(annotation_type, field_path, processor)
ListFileRequestsArg_validator = bv.Struct(ListFileRequestsArg)
class ListFileRequestsContinueArg(bb.Struct):
"""
:ivar file_requests.ListFileRequestsContinueArg.cursor: The cursor returned
by the previous API call specified in the endpoint description.
"""
__slots__ = [
'_cursor_value',
]
_has_required_fields = True
def __init__(self,
cursor=None):
self._cursor_value = bb.NOT_SET
if cursor is not None:
self.cursor = cursor
# Instance attribute type: str (validator is set below)
cursor = bb.Attribute("cursor")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(ListFileRequestsContinueArg, self)._process_custom_annotations(annotation_type, field_path, processor)
ListFileRequestsContinueArg_validator = bv.Struct(ListFileRequestsContinueArg)
class ListFileRequestsContinueError(GeneralFileRequestsError):
"""
There was an error retrieving the file requests.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar file_requests.ListFileRequestsContinueError.invalid_cursor: The cursor
is invalid.
"""
# Attribute is overwritten below the class definition
invalid_cursor = None
def is_invalid_cursor(self):
"""
Check if the union tag is ``invalid_cursor``.
:rtype: bool
"""
return self._tag == 'invalid_cursor'
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(ListFileRequestsContinueError, self)._process_custom_annotations(annotation_type, field_path, processor)
ListFileRequestsContinueError_validator = bv.Union(ListFileRequestsContinueError)
class ListFileRequestsError(GeneralFileRequestsError):
"""
There was an error retrieving the file requests.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(ListFileRequestsError, self)._process_custom_annotations(annotation_type, field_path, processor)
ListFileRequestsError_validator = bv.Union(ListFileRequestsError)
class ListFileRequestsResult(bb.Struct):
"""
Result for :meth:`dropbox.dropbox_client.Dropbox.file_requests_list`.
:ivar file_requests.ListFileRequestsResult.file_requests: The file requests
owned by this user. Apps with the app folder permission will only see
file requests in their app folder.
"""
__slots__ = [
'_file_requests_value',
]
_has_required_fields = True
def __init__(self,
file_requests=None):
self._file_requests_value = bb.NOT_SET
if file_requests is not None:
self.file_requests = file_requests
# Instance attribute type: list of [FileRequest] (validator is set below)
file_requests = bb.Attribute("file_requests")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(ListFileRequestsResult, self)._process_custom_annotations(annotation_type, field_path, processor)
ListFileRequestsResult_validator = bv.Struct(ListFileRequestsResult)
class ListFileRequestsV2Result(bb.Struct):
"""
Result for :meth:`dropbox.dropbox_client.Dropbox.file_requests_list` and
:meth:`dropbox.dropbox_client.Dropbox.file_requests_list_continue`.
:ivar file_requests.ListFileRequestsV2Result.file_requests: The file
requests owned by this user. Apps with the app folder permission will
only see file requests in their app folder.
:ivar file_requests.ListFileRequestsV2Result.cursor: Pass the cursor into
:meth:`dropbox.dropbox_client.Dropbox.file_requests_list_continue` to
obtain additional file requests.
:ivar file_requests.ListFileRequestsV2Result.has_more: Is true if there are
additional file requests that have not been returned yet. An additional
call to :route:list/continue` can retrieve them.
"""
__slots__ = [
'_file_requests_value',
'_cursor_value',
'_has_more_value',
]
_has_required_fields = True
def __init__(self,
file_requests=None,
cursor=None,
has_more=None):
self._file_requests_value = bb.NOT_SET
self._cursor_value = bb.NOT_SET
self._has_more_value = bb.NOT_SET
if file_requests is not None:
self.file_requests = file_requests
if cursor is not None:
self.cursor = cursor
if has_more is not None:
self.has_more = has_more
# Instance attribute type: list of [FileRequest] (validator is set below)
file_requests = bb.Attribute("file_requests")
# Instance attribute type: str (validator is set below)
cursor = bb.Attribute("cursor")
# Instance attribute type: bool (validator is set below)
has_more = bb.Attribute("has_more")
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(ListFileRequestsV2Result, self)._process_custom_annotations(annotation_type, field_path, processor)
ListFileRequestsV2Result_validator = bv.Struct(ListFileRequestsV2Result)
class UpdateFileRequestArgs(bb.Struct):
"""
Arguments for :meth:`dropbox.dropbox_client.Dropbox.file_requests_update`.
:ivar file_requests.UpdateFileRequestArgs.id: The ID of the file request to
update.
:ivar file_requests.UpdateFileRequestArgs.title: The new title of the file
request. Must not be empty.
:ivar file_requests.UpdateFileRequestArgs.destination: The new path of the
folder in the Dropbox where uploaded files will be sent. For apps with
the app folder permission, this will be relative to the app folder.
:ivar file_requests.UpdateFileRequestArgs.deadline: The new deadline for the
file request. Deadlines can only be set by Professional and Business
accounts.
:ivar file_requests.UpdateFileRequestArgs.open: Whether to set this file
request as open or closed.
:ivar file_requests.UpdateFileRequestArgs.description: The description of
the file request.
"""
__slots__ = [
'_id_value',
'_title_value',
'_destination_value',
'_deadline_value',
'_open_value',
'_description_value',
]
_has_required_fields = True
def __init__(self,
id=None,
title=None,
destination=None,
deadline=None,
open=None,
description=None):
self._id_value = bb.NOT_SET
self._title_value = bb.NOT_SET
self._destination_value = bb.NOT_SET
self._deadline_value = bb.NOT_SET
self._open_value = bb.NOT_SET
self._description_value = bb.NOT_SET
if id is not None:
self.id = id
if title is not None:
self.title = title
if destination is not None:
self.destination = destination
if deadline is not None:
self.deadline = deadline
if open is not None:
self.open = open
if description is not None:
self.description = description
# Instance attribute type: str (validator is set below)
id = bb.Attribute("id")
# Instance attribute type: str (validator is set below)
title = bb.Attribute("title", nullable=True)
# Instance attribute type: str (validator is set below)
destination = bb.Attribute("destination", nullable=True)
# Instance attribute type: UpdateFileRequestDeadline (validator is set below)
deadline = bb.Attribute("deadline", user_defined=True)
# Instance attribute type: bool (validator is set below)
open = bb.Attribute("open", nullable=True)
# Instance attribute type: str (validator is set below)
description = bb.Attribute("description", nullable=True)
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(UpdateFileRequestArgs, self)._process_custom_annotations(annotation_type, field_path, processor)
UpdateFileRequestArgs_validator = bv.Struct(UpdateFileRequestArgs)
class UpdateFileRequestDeadline(bb.Union):
"""
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
:ivar file_requests.UpdateFileRequestDeadline.no_update: Do not change the
file request's deadline.
:ivar Optional[FileRequestDeadline]
file_requests.UpdateFileRequestDeadline.update: If :val:`null`, the file
request's deadline is cleared.
"""
_catch_all = 'other'
# Attribute is overwritten below the class definition
no_update = None
# Attribute is overwritten below the class definition
other = None
@classmethod
def update(cls, val):
"""
Create an instance of this class set to the ``update`` tag with value
``val``.
:param FileRequestDeadline val:
:rtype: UpdateFileRequestDeadline
"""
return cls('update', val)
def is_no_update(self):
"""
Check if the union tag is ``no_update``.
:rtype: bool
"""
return self._tag == 'no_update'
def is_update(self):
"""
Check if the union tag is ``update``.
:rtype: bool
"""
return self._tag == 'update'
def is_other(self):
"""
Check if the union tag is ``other``.
:rtype: bool
"""
return self._tag == 'other'
def get_update(self):
"""
If None, the file request's deadline is cleared.
Only call this if :meth:`is_update` is true.
:rtype: FileRequestDeadline
"""
if not self.is_update():
raise AttributeError("tag 'update' not set")
return self._value
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(UpdateFileRequestDeadline, self)._process_custom_annotations(annotation_type, field_path, processor)
UpdateFileRequestDeadline_validator = bv.Union(UpdateFileRequestDeadline)
class UpdateFileRequestError(FileRequestError):
"""
There is an error updating the file request.
This class acts as a tagged union. Only one of the ``is_*`` methods will
return true. To get the associated value of a tag (if one exists), use the
corresponding ``get_*`` method.
"""
def _process_custom_annotations(self, annotation_type, field_path, processor):
super(UpdateFileRequestError, self)._process_custom_annotations(annotation_type, field_path, processor)
UpdateFileRequestError_validator = bv.Union(UpdateFileRequestError)
FileRequestId_validator = bv.String(min_length=1, pattern='[-_0-9a-zA-Z]+')
FileRequestValidationError_validator = bv.Nullable(bv.String())
GeneralFileRequestsError._disabled_for_team_validator = bv.Void()
GeneralFileRequestsError._other_validator = bv.Void()
GeneralFileRequestsError._tagmap = {
'disabled_for_team': GeneralFileRequestsError._disabled_for_team_validator,
'other': GeneralFileRequestsError._other_validator,
}
GeneralFileRequestsError.disabled_for_team = GeneralFileRequestsError('disabled_for_team')
GeneralFileRequestsError.other = GeneralFileRequestsError('other')
CountFileRequestsError._tagmap = {
}
CountFileRequestsError._tagmap.update(GeneralFileRequestsError._tagmap)
CountFileRequestsResult.file_request_count.validator = bv.UInt64()
CountFileRequestsResult._all_field_names_ = set(['file_request_count'])
CountFileRequestsResult._all_fields_ = [('file_request_count', CountFileRequestsResult.file_request_count.validator)]
CreateFileRequestArgs.title.validator = bv.String(min_length=1)
CreateFileRequestArgs.destination.validator = files.Path_validator
CreateFileRequestArgs.deadline.validator = bv.Nullable(FileRequestDeadline_validator)
CreateFileRequestArgs.open.validator = bv.Boolean()
CreateFileRequestArgs.description.validator = bv.Nullable(bv.String())
CreateFileRequestArgs._all_field_names_ = set([
'title',
'destination',
'deadline',
'open',
'description',
])
CreateFileRequestArgs._all_fields_ = [
('title', CreateFileRequestArgs.title.validator),
('destination', CreateFileRequestArgs.destination.validator),
('deadline', CreateFileRequestArgs.deadline.validator),
('open', CreateFileRequestArgs.open.validator),
('description', CreateFileRequestArgs.description.validator),
]
FileRequestError._not_found_validator = bv.Void()
FileRequestError._not_a_folder_validator = bv.Void()
FileRequestError._app_lacks_access_validator = bv.Void()
FileRequestError._no_permission_validator = bv.Void()
FileRequestError._email_unverified_validator = bv.Void()
FileRequestError._validation_error_validator = bv.Void()
FileRequestError._tagmap = {
'not_found': FileRequestError._not_found_validator,
'not_a_folder': FileRequestError._not_a_folder_validator,
'app_lacks_access': FileRequestError._app_lacks_access_validator,
'no_permission': FileRequestError._no_permission_validator,
'email_unverified': FileRequestError._email_unverified_validator,
'validation_error': FileRequestError._validation_error_validator,
}
FileRequestError._tagmap.update(GeneralFileRequestsError._tagmap)
FileRequestError.not_found = FileRequestError('not_found')
FileRequestError.not_a_folder = FileRequestError('not_a_folder')
FileRequestError.app_lacks_access = FileRequestError('app_lacks_access')
FileRequestError.no_permission = FileRequestError('no_permission')
FileRequestError.email_unverified = FileRequestError('email_unverified')
FileRequestError.validation_error = FileRequestError('validation_error')
CreateFileRequestError._invalid_location_validator = bv.Void()
CreateFileRequestError._rate_limit_validator = bv.Void()
CreateFileRequestError._tagmap = {
'invalid_location': CreateFileRequestError._invalid_location_validator,
'rate_limit': CreateFileRequestError._rate_limit_validator,
}
CreateFileRequestError._tagmap.update(FileRequestError._tagmap)
CreateFileRequestError.invalid_location = CreateFileRequestError('invalid_location')
CreateFileRequestError.rate_limit = CreateFileRequestError('rate_limit')
DeleteAllClosedFileRequestsError._tagmap = {
}
DeleteAllClosedFileRequestsError._tagmap.update(FileRequestError._tagmap)
DeleteAllClosedFileRequestsResult.file_requests.validator = bv.List(FileRequest_validator)
DeleteAllClosedFileRequestsResult._all_field_names_ = set(['file_requests'])
DeleteAllClosedFileRequestsResult._all_fields_ = [('file_requests', DeleteAllClosedFileRequestsResult.file_requests.validator)]
DeleteFileRequestArgs.ids.validator = bv.List(FileRequestId_validator)
DeleteFileRequestArgs._all_field_names_ = set(['ids'])
DeleteFileRequestArgs._all_fields_ = [('ids', DeleteFileRequestArgs.ids.validator)]
DeleteFileRequestError._file_request_open_validator = bv.Void()
DeleteFileRequestError._tagmap = {
'file_request_open': DeleteFileRequestError._file_request_open_validator,
}
DeleteFileRequestError._tagmap.update(FileRequestError._tagmap)
DeleteFileRequestError.file_request_open = DeleteFileRequestError('file_request_open')
DeleteFileRequestsResult.file_requests.validator = bv.List(FileRequest_validator)
DeleteFileRequestsResult._all_field_names_ = set(['file_requests'])
DeleteFileRequestsResult._all_fields_ = [('file_requests', DeleteFileRequestsResult.file_requests.validator)]
FileRequest.id.validator = FileRequestId_validator
FileRequest.url.validator = bv.String(min_length=1)
FileRequest.title.validator = bv.String(min_length=1)
FileRequest.destination.validator = bv.Nullable(files.Path_validator)
FileRequest.created.validator = common.DropboxTimestamp_validator
FileRequest.deadline.validator = bv.Nullable(FileRequestDeadline_validator)
FileRequest.is_open.validator = bv.Boolean()
FileRequest.file_count.validator = bv.Int64()
FileRequest.description.validator = bv.Nullable(bv.String())
FileRequest._all_field_names_ = set([
'id',
'url',
'title',
'destination',
'created',
'deadline',
'is_open',
'file_count',
'description',
])
FileRequest._all_fields_ = [
('id', FileRequest.id.validator),
('url', FileRequest.url.validator),
('title', FileRequest.title.validator),
('destination', FileRequest.destination.validator),
('created', FileRequest.created.validator),
('deadline', FileRequest.deadline.validator),
('is_open', FileRequest.is_open.validator),
('file_count', FileRequest.file_count.validator),
('description', FileRequest.description.validator),
]
FileRequestDeadline.deadline.validator = common.DropboxTimestamp_validator
FileRequestDeadline.allow_late_uploads.validator = bv.Nullable(GracePeriod_validator)
FileRequestDeadline._all_field_names_ = set([
'deadline',
'allow_late_uploads',
])
FileRequestDeadline._all_fields_ = [
('deadline', FileRequestDeadline.deadline.validator),
('allow_late_uploads', FileRequestDeadline.allow_late_uploads.validator),
]
GetFileRequestArgs.id.validator = FileRequestId_validator
GetFileRequestArgs._all_field_names_ = set(['id'])
GetFileRequestArgs._all_fields_ = [('id', GetFileRequestArgs.id.validator)]
GetFileRequestError._tagmap = {
}
GetFileRequestError._tagmap.update(FileRequestError._tagmap)
GracePeriod._one_day_validator = bv.Void()
GracePeriod._two_days_validator = bv.Void()
GracePeriod._seven_days_validator = bv.Void()
GracePeriod._thirty_days_validator = bv.Void()
GracePeriod._always_validator = bv.Void()
GracePeriod._other_validator = bv.Void()
GracePeriod._tagmap = {
'one_day': GracePeriod._one_day_validator,
'two_days': GracePeriod._two_days_validator,
'seven_days': GracePeriod._seven_days_validator,
'thirty_days': GracePeriod._thirty_days_validator,
'always': GracePeriod._always_validator,
'other': GracePeriod._other_validator,
}
GracePeriod.one_day = GracePeriod('one_day')
GracePeriod.two_days = GracePeriod('two_days')
GracePeriod.seven_days = GracePeriod('seven_days')
GracePeriod.thirty_days = GracePeriod('thirty_days')
GracePeriod.always = GracePeriod('always')
GracePeriod.other = GracePeriod('other')
ListFileRequestsArg.limit.validator = bv.UInt64()
ListFileRequestsArg._all_field_names_ = set(['limit'])
ListFileRequestsArg._all_fields_ = [('limit', ListFileRequestsArg.limit.validator)]
ListFileRequestsContinueArg.cursor.validator = bv.String()
ListFileRequestsContinueArg._all_field_names_ = set(['cursor'])
ListFileRequestsContinueArg._all_fields_ = [('cursor', ListFileRequestsContinueArg.cursor.validator)]
ListFileRequestsContinueError._invalid_cursor_validator = bv.Void()
ListFileRequestsContinueError._tagmap = {
'invalid_cursor': ListFileRequestsContinueError._invalid_cursor_validator,
}
ListFileRequestsContinueError._tagmap.update(GeneralFileRequestsError._tagmap)
ListFileRequestsContinueError.invalid_cursor = ListFileRequestsContinueError('invalid_cursor')
ListFileRequestsError._tagmap = {
}
ListFileRequestsError._tagmap.update(GeneralFileRequestsError._tagmap)
ListFileRequestsResult.file_requests.validator = bv.List(FileRequest_validator)
ListFileRequestsResult._all_field_names_ = set(['file_requests'])
ListFileRequestsResult._all_fields_ = [('file_requests', ListFileRequestsResult.file_requests.validator)]
ListFileRequestsV2Result.file_requests.validator = bv.List(FileRequest_validator)
ListFileRequestsV2Result.cursor.validator = bv.String()
ListFileRequestsV2Result.has_more.validator = bv.Boolean()
ListFileRequestsV2Result._all_field_names_ = set([
'file_requests',
'cursor',
'has_more',
])
ListFileRequestsV2Result._all_fields_ = [
('file_requests', ListFileRequestsV2Result.file_requests.validator),
('cursor', ListFileRequestsV2Result.cursor.validator),
('has_more', ListFileRequestsV2Result.has_more.validator),
]
UpdateFileRequestArgs.id.validator = FileRequestId_validator
UpdateFileRequestArgs.title.validator = bv.Nullable(bv.String(min_length=1))
UpdateFileRequestArgs.destination.validator = bv.Nullable(files.Path_validator)
UpdateFileRequestArgs.deadline.validator = UpdateFileRequestDeadline_validator
UpdateFileRequestArgs.open.validator = bv.Nullable(bv.Boolean())
UpdateFileRequestArgs.description.validator = bv.Nullable(bv.String())
UpdateFileRequestArgs._all_field_names_ = set([
'id',
'title',
'destination',
'deadline',
'open',
'description',
])
UpdateFileRequestArgs._all_fields_ = [
('id', UpdateFileRequestArgs.id.validator),
('title', UpdateFileRequestArgs.title.validator),
('destination', UpdateFileRequestArgs.destination.validator),
('deadline', UpdateFileRequestArgs.deadline.validator),
('open', UpdateFileRequestArgs.open.validator),
('description', UpdateFileRequestArgs.description.validator),
]
UpdateFileRequestDeadline._no_update_validator = bv.Void()
UpdateFileRequestDeadline._update_validator = bv.Nullable(FileRequestDeadline_validator)
UpdateFileRequestDeadline._other_validator = bv.Void()
UpdateFileRequestDeadline._tagmap = {
'no_update': UpdateFileRequestDeadline._no_update_validator,
'update': UpdateFileRequestDeadline._update_validator,
'other': UpdateFileRequestDeadline._other_validator,
}
UpdateFileRequestDeadline.no_update = UpdateFileRequestDeadline('no_update')
UpdateFileRequestDeadline.other = UpdateFileRequestDeadline('other')
UpdateFileRequestError._tagmap = {
}
UpdateFileRequestError._tagmap.update(FileRequestError._tagmap)
CreateFileRequestArgs.open.default = True
ListFileRequestsArg.limit.default = 1000
UpdateFileRequestArgs.deadline.default = UpdateFileRequestDeadline.no_update
count = bb.Route(
'count',
1,
False,
bv.Void(),
CountFileRequestsResult_validator,
CountFileRequestsError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
create = bb.Route(
'create',
1,
False,
CreateFileRequestArgs_validator,
FileRequest_validator,
CreateFileRequestError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
delete = bb.Route(
'delete',
1,
False,
DeleteFileRequestArgs_validator,
DeleteFileRequestsResult_validator,
DeleteFileRequestError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
delete_all_closed = bb.Route(
'delete_all_closed',
1,
False,
bv.Void(),
DeleteAllClosedFileRequestsResult_validator,
DeleteAllClosedFileRequestsError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
get = bb.Route(
'get',
1,
False,
GetFileRequestArgs_validator,
FileRequest_validator,
GetFileRequestError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
list_v2 = bb.Route(
'list',
2,
False,
ListFileRequestsArg_validator,
ListFileRequestsV2Result_validator,
ListFileRequestsError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
list = bb.Route(
'list',
1,
False,
bv.Void(),
ListFileRequestsResult_validator,
ListFileRequestsError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
list_continue = bb.Route(
'list/continue',
1,
False,
ListFileRequestsContinueArg_validator,
ListFileRequestsV2Result_validator,
ListFileRequestsContinueError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
update = bb.Route(
'update',
1,
False,
UpdateFileRequestArgs_validator,
FileRequest_validator,
UpdateFileRequestError_validator,
{'auth': 'user',
'host': 'api',
'style': 'rpc'},
)
ROUTES = {
'count': count,
'create': create,
'delete': delete,
'delete_all_closed': delete_all_closed,
'get': get,
'list:2': list_v2,
'list': list,
'list/continue': list_continue,
'update': update,
}
|
2c79ab3992e41851a0167e3deccc8bcbe6f19e0a
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/BreachRx/Integrations/BreachRx/BreachRx.py
|
6c3bad1e6e2c38726b20694b0f4e34fd6ac419a3
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 8,777
|
py
|
BreachRx.py
|
from CommonServerPython import * # noqa # pylint: disable=unused-wildcard-import
from CommonServerUserPython import * # noqa
from collections.abc import Callable
import urllib3
import traceback
from urllib.parse import urlparse
from gql import gql, Client
from gql.transport.requests import RequestsHTTPTransport
from requests.auth import HTTPBasicAuth
# Disable insecure warnings
urllib3.disable_warnings() # pylint: disable=no-member
create_incident_mutation = gql("""
mutation CreateIncident(
$severity: String!,
$name: String!,
$type: String!,
$description: String
) {
createIncident(
type: $type,
severity: $severity,
name: $name,
description: $description
) {
id
name
severity {
name
}
types {
type {
name
}
}
description
identifier
}
}""")
get_incident_severities = gql("""{
incidentSeverities {
name
ordering
}
}""")
get_incident_types = gql("""{
types {
name
}
}""")
get_actions_for_incident = gql("""
query GetActionsForIncident($incidentId: Int!) {
actions(where: {
incidentId: {
equals: $incidentId
}
}) {
id
name
description
phase {
name
id
}
user {
fullName
email
}
}
}""")
get_incident_by_name = gql("""
query GetIncidentByName($name: String, $identifier: String) {
incidents(first: 1, where: {
name: {
contains: $name,
mode: insensitive
}
identifier: {
contains: $identifier,
mode: insensitive
}
}) {
id
name
severity {
name
}
types {
type {
name
}
}
description
identifier
}
}""")
class BreachRxClient:
def __init__(self, base_url: str, api_key: str, secret_key: str, org_name: str, verify: bool):
self.api_key = api_key
self.secret_key = secret_key
self.org_name = org_name
auth = HTTPBasicAuth(api_key, secret_key)
transport = RequestsHTTPTransport(
url=base_url,
auth=auth,
headers={"orgname": org_name},
timeout=60,
verify=verify
)
self.client = Client(
transport=transport, fetch_schema_from_transport=False
)
def get_incident_severities(self):
return self.client.execute(get_incident_severities)["incidentSeverities"]
def get_incident_types(self):
return self.client.execute(get_incident_types)["types"]
def create_incident(self, name: Optional[str], description: Optional[str]):
severities = self.get_incident_severities()
types = self.get_incident_types()
params = {
"severity": severities[0]["name"],
"name": name,
"type": types[0]["name"],
"description": description
}
return self.client.execute(create_incident_mutation, params)["createIncident"]
def get_incident(self, name: Optional[str], identifier: Optional[str]):
params = {
"name": name,
"identifier": identifier
}
results = self.client.execute(get_incident_by_name, params)['incidents']
if results:
return results.pop()
else:
return None
def get_actions_for_incident(self, incident_id):
params = {
"incidentId": incident_id
}
return self.client.execute(get_actions_for_incident, params)["actions"]
def test_module(client: BreachRxClient):
try:
client.get_incident_severities()
return "ok"
except Exception:
raise Exception("Authorization Error: make sure your API Key and Secret Key are correctly set")
def create_incident_command(
client: BreachRxClient,
incident_name: str = None,
description: str = None
) -> CommandResults:
if not incident_name:
incident_name = demisto.incident().get("name")
if not description:
description = (
f"""An Incident copied from the Palo Alto Networks XSOAR platform.
<br>
<br>
XSOAR Incident Name: {demisto.incident().get('name')}"""
)
response = client.create_incident(incident_name, description)
incident_name = response["name"]
return CommandResults(
outputs_prefix="BreachRx.Incident",
outputs_key_field="id",
outputs=response,
raw_response=response,
readable_output=f"Incident created with name={incident_name}."
)
def get_incident_actions_command(
client: BreachRxClient,
incident_name: str = None,
incident_identifier: str = None
) -> Union[CommandResults, str]:
incidents = demisto.dt(demisto.context(), 'BreachRx.Incident')
if not incidents:
if not incident_name and not incident_identifier:
raise Exception(
"Error: No BreachRx privacy Incident associated with this Incident,"
" and no Incident search terms provided."
)
incidents = [client.get_incident(incident_name, incident_identifier)]
if not incidents:
raise Exception("Error: No BreachRx privacy Incident found using the search terms provided.")
if type(incidents) is not list:
incidents = [incidents]
for incident in incidents:
incident["actions"] = client.get_actions_for_incident(incident['id'])
for action in incident["actions"]:
action["phase_name"] = action["phase"]["name"]
readable_output = ""
for incident in incidents:
actions_markdown_table = tableToMarkdown("Actions", incident["actions"], headers=["name", "phase_name"])
readable_output += f"# {incident['name']} ({incident['id']})\n" + actions_markdown_table + "\n"
return CommandResults(
outputs_prefix="BreachRx.Incident",
outputs_key_field="id",
outputs=incidents,
raw_response=incidents,
readable_output=readable_output
)
def import_incident_command(
client: BreachRxClient,
incident_name: str = None,
incident_identifier: str = None
) -> Union[CommandResults, str]:
incident = client.get_incident(incident_name, incident_identifier)
if not incident:
raise Exception("Error: No BreachRx privacy Incident found using the search terms provided.")
return CommandResults(
outputs_prefix="BreachRx.Incident",
outputs_key_field="id",
outputs=incident,
raw_response=incident,
readable_output=f"Incident imported with name={incident.get('name')}."
)
def get_incident_command(
client: BreachRxClient,
incident_name: str = None,
incident_identifier: str = None
) -> Union[CommandResults, str]:
incident = client.get_incident(incident_name, incident_identifier)
if incident:
return CommandResults(
raw_response=incident,
readable_output=f'Incident found with name="{incident.get("name")}" and identifier="{incident.get("identifier")}".'
)
else:
return "No Incident found with those search terms."
COMMANDS = {
"test-module": test_module,
"breachrx-incident-create": create_incident_command,
"breachrx-incident-actions-get": get_incident_actions_command,
"breachrx-incident-import": import_incident_command,
"breachrx-incident-get": get_incident_command,
}
def is_valid_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def main() -> None: # pragma: no cover
try:
base_url = demisto.params()["api_url"]
org_name = demisto.params()["url"].split(".")[0].replace("https://", "")
api_key = demisto.params().get("credentials", {}).get("identifier")
secret_key = demisto.params().get("credentials", {}).get("password")
verify = demisto.params().get("insecure", False)
if not is_valid_url(base_url):
raise Exception("The GraphQL API URL is not a valid URL.")
if not is_valid_url(demisto.params()["url"]):
raise Exception("The BreachRx instance URL is not a valid URL.")
client = BreachRxClient(base_url, api_key, secret_key, org_name, verify)
command_func: Any[Callable, None] = COMMANDS.get(demisto.command())
if command_func:
return_results(command_func(client, **demisto.args()))
else:
raise NotImplementedError(f'{demisto.command()} command is not implemented.')
except Exception as e:
demisto.error(traceback.format_exc())
return_error(f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}")
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
|
2c920735d2e74b040a143fefc441cd964b428440
|
e46c684f4b8dd07acc8cda5c30add73d8bcd7aa9
|
/dallinger/experiment_server/utils.py
|
5e5511b196cae324c6d22502102ed162854f0e85
|
[
"MIT"
] |
permissive
|
Dallinger/Dallinger
|
b229265581cef9e72f35cb1cc82e0a90534d9be0
|
28b61f4e12ffa110fc031f6322e16745fb69c094
|
refs/heads/master
| 2023-09-01T03:06:07.607759
| 2023-08-30T12:26:14
| 2023-08-30T12:26:14
| 67,452,779
| 120
| 56
|
MIT
| 2023-09-14T15:00:38
| 2016-09-05T21:46:04
|
Python
|
UTF-8
|
Python
| false
| false
| 7,575
|
py
|
utils.py
|
import logging
import sys
from datetime import timedelta
from functools import update_wrapper
from json import dumps
import user_agents
from flask import Response, current_app, make_response, render_template, request
from dallinger.config import get_config
logger = logging.getLogger(__name__)
def crossdomain(
origin=None,
methods=None,
headers=None,
max_age=21600,
attach_to_all=True,
automatic_options=True,
):
if methods is not None:
methods = ", ".join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, str):
headers = ", ".join(x.upper() for x in headers)
if not isinstance(origin, str):
origin = ", ".join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers["allow"]
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == "OPTIONS":
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != "OPTIONS":
return resp
h = resp.headers
h["Access-Control-Allow-Origin"] = origin
h["Access-Control-Allow-Methods"] = get_methods()
h["Access-Control-Max-Age"] = str(max_age)
if headers is not None:
h["Access-Control-Allow-Headers"] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def date_handler(obj):
"""Serialize dates."""
return obj.isoformat() if hasattr(obj, "isoformat") else object
def nocache(func):
"""Stop caching for pages wrapped in nocache decorator."""
def new_func(*args, **kwargs):
"""No cache Wrapper."""
resp = make_response(func(*args, **kwargs))
resp.cache_control.no_cache = True
return resp
return update_wrapper(new_func, func)
class ExperimentError(Exception):
"""
Error class for experimental errors, such as subject not being found in
the database.
"""
def __init__(self, value):
experiment_errors = dict(
status_incorrectly_set=1000,
hit_assign_worker_id_not_set_in_mturk=1001,
hit_assign_worker_id_not_set_in_consent=1002,
hit_assign_worker_id_not_set_in_exp=1003,
hit_assign_appears_in_database_more_than_once=1004,
hit_assign_worker_id_not_set_by_recruiter=1006,
already_started_exp=1008,
already_started_exp_mturk=1009,
already_did_exp_hit=1010,
tried_to_quit=1011,
intermediate_save=1012,
improper_inputs=1013,
browser_type_not_allowed=1014,
api_server_not_reachable=1015,
ad_not_found=1016,
error_setting_worker_complete=1017,
hit_not_registered_with_ad_server=1018,
template_unsafe=1019,
insert_mode_failed=1020,
page_not_found=404,
in_debug=2005,
unknown_error=9999,
)
self.value = value
self.errornum = experiment_errors[self.value]
self.template = "error.html"
def __str__(self):
return repr(self.value)
class ValidatesBrowser(object):
"""Checks if participant's browser has been excluded via the Configuration."""
def __init__(self, config):
self.config = config
@property
def exclusions(self):
"""Return list of browser exclusion rules defined in the Configuration."""
exclusion_rules = [
r.strip()
for r in self.config.get("browser_exclude_rule", "").split(",")
if r.strip()
]
return exclusion_rules
def is_supported(self, user_agent_string):
"""Check user agent against configured exclusions."""
user_agent_obj = user_agents.parse(user_agent_string)
browser_ok = True
for rule in self.exclusions:
if rule in ["mobile", "tablet", "touchcapable", "pc", "bot"]:
if (
(rule == "mobile" and user_agent_obj.is_mobile)
or (rule == "tablet" and user_agent_obj.is_tablet)
or (rule == "touchcapable" and user_agent_obj.is_touch_capable)
or (rule == "pc" and user_agent_obj.is_pc)
or (rule == "bot" and user_agent_obj.is_bot)
):
browser_ok = False
elif rule in user_agent_string:
browser_ok = False
return browser_ok
"""Define some canned response types."""
def success_response(**data):
"""Return a generic success response."""
data_out = {}
data_out["status"] = "success"
data_out.update(data)
js = dumps(data_out, default=date_handler)
return Response(js, status=200, mimetype="application/json")
def error_response(
error_type="Internal server error",
error_text="",
status=400,
participant=None,
simple=False,
request_data="",
):
"""Return a generic server error response."""
last_exception = sys.exc_info()
if last_exception[0]:
logger.error(
"Failure for request: {!r}".format(dict(request.args)),
exc_info=last_exception,
)
data = {"status": "error"}
if simple:
data["message"] = error_text
else:
data["html"] = (
error_page(
error_text=error_text,
error_type=error_type,
participant=participant,
request_data=request_data,
)
.get_data()
.decode("utf-8")
)
return Response(dumps(data), status=status, mimetype="application/json")
def error_page(
participant=None,
error_text=None,
compensate=True,
error_type="default",
request_data="",
):
"""Render HTML for error page."""
config = _config()
if error_text is None:
error_text = "There has been an error and so you are unable to continue, sorry!"
if participant is not None:
hit_id = participant.hit_id
assignment_id = participant.assignment_id
worker_id = participant.worker_id
participant_id = participant.id
else:
hit_id = request.form.get("hit_id", "")
assignment_id = request.form.get("assignment_id", "")
worker_id = request.form.get("worker_id", "")
participant_id = request.form.get("participant_id", None)
if participant_id:
try:
participant_id = int(participant_id)
except (ValueError, TypeError):
participant_id = None
return make_response(
render_template(
"error.html",
error_text=error_text,
compensate=compensate,
contact_address=config.get("contact_email_on_error"),
error_type=error_type,
hit_id=hit_id,
assignment_id=assignment_id,
worker_id=worker_id,
request_data=request_data,
participant_id=participant_id,
),
500,
)
def _config():
config = get_config()
if not config.ready:
config.load()
return config
|
5d96722f6b7528182930c1517d5ba0f05a7b58e7
|
3c41443364da8b44c74dce08ef94a1acd1b66b3e
|
/osf/management/commands/update_institution_project_counts.py
|
53a0d735ddc728245b31d6959471c38e0f2a204d
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-warranty-disclaimer",
"AGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-proprietary-license",
"MPL-1.1",
"CPAL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-2.0"
] |
permissive
|
CenterForOpenScience/osf.io
|
71d9540be7989f7118a33e15bc4a6ce2d2492ac1
|
a3e0a0b9ddda5dd75fc8248d58f3bcdeece0323e
|
refs/heads/develop
| 2023-09-04T03:21:14.970917
| 2023-08-31T14:49:20
| 2023-08-31T14:49:20
| 10,199,599
| 683
| 390
|
Apache-2.0
| 2023-09-14T17:07:52
| 2013-05-21T15:53:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
update_institution_project_counts.py
|
import datetime as dt
from django.core.management.base import BaseCommand
from framework.celery_tasks import app as celery_app
from osf.metrics import InstitutionProjectCounts, UserInstitutionProjectCounts
from osf.models import Institution, Node
@celery_app.task(name='management.commands.update_institution_project_counts')
def update_institution_project_counts():
now = dt.datetime.now()
for institution in Institution.objects.all():
institution_public_projects_qs = institution.nodes.filter(type='osf.node', parent_nodes=None, is_public=True, is_deleted=False)
institution_private_projects_qs = institution.nodes.filter(type='osf.node', parent_nodes=None, is_public=False, is_deleted=False)
institution_public_projects_count = institution_public_projects_qs.count()
institution_private_projects_count = institution_private_projects_qs.count()
InstitutionProjectCounts.record_institution_project_counts(
institution=institution,
public_project_count=institution_public_projects_count,
private_project_count=institution_private_projects_count,
timestamp=now
)
for user in institution.get_institution_users():
user_public_project_count = Node.objects.get_nodes_for_user(
user=user,
base_queryset=institution_public_projects_qs
).count()
user_private_project_count = Node.objects.get_nodes_for_user(
user=user,
base_queryset=institution_private_projects_qs
).count()
UserInstitutionProjectCounts.record_user_institution_project_counts(
user=user,
institution=institution,
public_project_count=user_public_project_count,
private_project_count=user_private_project_count,
timestamp=now
)
class Command(BaseCommand):
def handle(self, *args, **options):
update_institution_project_counts()
|
7878fd7b6d2fb511b847949ae76aedde2286e89c
|
a411a55762de11dc2c9d913ff33d2f1477ac02cf
|
/orc8r/gateway/python/magma/common/redis/tests/dict_tests.py
|
e3f14f5889cf00a1a372b487ceb3f4bc155f515f
|
[
"BSD-3-Clause"
] |
permissive
|
magma/magma
|
0dc48c1513d9968bd05fb7589f302c192b7c0f94
|
0e1d895dfe625681229e181fbc2dbad83e13c5cb
|
refs/heads/master
| 2023-09-04T09:31:56.140395
| 2023-08-29T13:54:49
| 2023-08-29T13:54:49
| 170,803,235
| 1,219
| 525
|
NOASSERTION
| 2023-09-07T17:45:42
| 2019-02-15T04:46:24
|
C++
|
UTF-8
|
Python
| false
| false
| 5,726
|
py
|
dict_tests.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest import TestCase, main
import fakeredis
from magma.common.redis.containers import RedisFlatDict, RedisHashDict
from magma.common.redis.serializers import (
RedisSerde,
get_proto_deserializer,
get_proto_serializer,
)
from orc8r.protos.service303_pb2 import LogVerbosity
class RedisDictTests(TestCase):
"""
Tests for the RedisHashDict and RedisFlatDict containers
"""
def setUp(self):
client = fakeredis.FakeStrictRedis()
# Use arbitrary orc8r proto to test with
self._hash_dict = RedisHashDict(
client,
"unittest",
get_proto_serializer(),
get_proto_deserializer(LogVerbosity),
)
serde = RedisSerde(
'log_verbosity',
get_proto_serializer(),
get_proto_deserializer(LogVerbosity),
)
self._flat_dict = RedisFlatDict(client, serde)
def test_hash_insert(self):
expected = LogVerbosity(verbosity=0)
expected2 = LogVerbosity(verbosity=1)
# insert proto
self._hash_dict['key1'] = expected
version = self._hash_dict.get_version("key1")
actual = self._hash_dict['key1']
self.assertEqual(1, version)
self.assertEqual(expected, actual)
# update proto
self._hash_dict['key1'] = expected2
version2 = self._hash_dict.get_version("key1")
actual2 = self._hash_dict['key1']
self.assertEqual(2, version2)
self.assertEqual(expected2, actual2)
def test_missing_version(self):
missing_version = self._hash_dict.get_version("key2")
self.assertEqual(0, missing_version)
def test_hash_delete(self):
expected = LogVerbosity(verbosity=2)
self._hash_dict['key3'] = expected
actual = self._hash_dict['key3']
self.assertEqual(expected, actual)
self._hash_dict.pop('key3')
self.assertRaises(KeyError, self._hash_dict.__getitem__, 'key3')
def test_flat_insert(self):
expected = LogVerbosity(verbosity=5)
expected2 = LogVerbosity(verbosity=1)
# insert proto
self._flat_dict['key1'] = expected
version = self._flat_dict.get_version("key1")
actual = self._flat_dict['key1']
self.assertEqual(1, version)
self.assertEqual(expected, actual)
# update proto
self._flat_dict["key1"] = expected2
version2 = self._flat_dict.get_version("key1")
actual2 = self._flat_dict["key1"]
actual3 = self._flat_dict.get("key1")
self.assertEqual(2, version2)
self.assertEqual(expected2, actual2)
self.assertEqual(expected2, actual3)
def test_flat_missing_version(self):
missing_version = self._flat_dict.get_version("key2")
self.assertEqual(0, missing_version)
def test_flat_bad_key(self):
expected = LogVerbosity(verbosity=2)
self.assertRaises(
ValueError, self._flat_dict.__setitem__,
'bad:key', expected,
)
self.assertRaises(
ValueError, self._flat_dict.__getitem__,
'bad:key',
)
self.assertRaises(
ValueError, self._flat_dict.__delitem__,
'bad:key',
)
def test_flat_delete(self):
expected = LogVerbosity(verbosity=2)
self._flat_dict['key3'] = expected
actual = self._flat_dict['key3']
self.assertEqual(expected, actual)
del self._flat_dict['key3']
self.assertRaises(
KeyError, self._flat_dict.__getitem__,
'key3',
)
self.assertEqual(None, self._flat_dict.get('key3'))
def test_flat_clear(self):
expected = LogVerbosity(verbosity=2)
self._flat_dict['key3'] = expected
actual = self._flat_dict['key3']
self.assertEqual(expected, actual)
self._flat_dict.clear()
self.assertEqual(0, len(self._flat_dict.keys()))
def test_flat_garbage_methods(self):
expected = LogVerbosity(verbosity=2)
expected2 = LogVerbosity(verbosity=3)
key = "k1"
key2 = "k2"
bad_key = "bad_key"
self._flat_dict[key] = expected
self._flat_dict[key2] = expected2
self._flat_dict.mark_as_garbage(key)
is_garbage = self._flat_dict.is_garbage(key)
self.assertTrue(is_garbage)
is_garbage2 = self._flat_dict.is_garbage(key2)
self.assertFalse(is_garbage2)
self.assertEqual([key], self._flat_dict.garbage_keys())
self.assertEqual([key2], self._flat_dict.keys())
self.assertIsNone(self._flat_dict.get(key))
self.assertEqual(expected2, self._flat_dict.get(key2))
deleted = self._flat_dict.delete_garbage(key)
not_deleted = self._flat_dict.delete_garbage(key2)
self.assertTrue(deleted)
self.assertFalse(not_deleted)
self.assertIsNone(self._flat_dict.get(key))
self.assertEqual(expected2, self._flat_dict.get(key2))
with self.assertRaises(KeyError):
self._flat_dict.is_garbage(bad_key)
with self.assertRaises(KeyError):
self._flat_dict.mark_as_garbage(bad_key)
if __name__ == "__main__":
main()
|
13a6e2d29d9cb15cf33923ef8312ad24f7862fbc
|
ad3cdc5e7919b705ed88fdaa58a556d0c53a2981
|
/configurators/scripts/bin/system-colour
|
484f6f7989420125f6fea5c89c1605b35b459df2
|
[
"MIT"
] |
permissive
|
naggie/dotfiles
|
c77967dd9f62438c3faa37a55985ec6ed4391fff
|
bcc7d0032bb01ea75986fea7f71dad9b43fa3634
|
refs/heads/master
| 2023-08-31T02:09:56.215466
| 2023-08-21T20:41:52
| 2023-08-21T20:41:52
| 5,134,257
| 130
| 23
|
MIT
| 2020-04-19T12:47:38
| 2012-07-21T14:36:58
|
Vim script
|
UTF-8
|
Python
| false
| false
| 4,919
|
system-colour
|
#!/usr/bin/env python3
from hashlib import md5
import json
from os import path
from os import geteuid
import socket
import sys
from string import ascii_letters
# WCAG contrast calculation functions based on
# https://github.com/gsnedders/wcag-contrast-ratio/blob/master/wcag_contrast_ratio/contrast.py
# Copyright (c) 2015 Geoffrey Sneddon.
# license: configurators/scripts/etc/wcag-contrast-ratio-LICENSE.md
# JSON data file provided by jonasjacek
# https://github.com/jonasjacek/colors/blob/master/data.json
# license: configurators/scripts/etc/colors-LICENSE.md
# TODO use machine id for more entropy? Could help with unreachable colours when matching hostname colour
with open(path.expanduser("~/.local/share/256-terminal-colour-map.json")) as f:
COLOUR_LIST = json.load(f)
# filter out strong red, reserved for root
COLOUR_LIST = [ c for c in COLOUR_LIST if c["colorId"] not in (160, 196, 9, 88, 124)]
# WC3
CONSTRAST_THRESHOLD = 4.5
def get_colour(colorId: int):
for c in COLOUR_LIST:
if c["colorId"] == colorId:
return c
raise ValueError("Invalid colorId")
def rgb_contrast(rgb1, rgb2):
for r, g, b in (rgb1, rgb2):
if not 0.0 <= r <= 1.0:
raise ValueError("r is out of valid range (0.0 - 1.0)")
if not 0.0 <= g <= 1.0:
raise ValueError("g is out of valid range (0.0 - 1.0)")
if not 0.0 <= b <= 1.0:
raise ValueError("b is out of valid range (0.0 - 1.0)")
l1 = relative_luminance(*rgb1)
l2 = relative_luminance(*rgb2)
if l1 > l2:
return (l1 + 0.05) / (l2 + 0.05)
else:
return (l2 + 0.05) / (l1 + 0.05)
def relative_luminance(r, g, b):
r = linearise(r)
g = linearise(g)
b = linearise(b)
return 0.2126 * r + 0.7152 * g + 0.0722 * b
def linearise(v):
if v <= 0.03928:
return v / 12.92
else:
return ((v + 0.055) / 1.055) ** 2.4
def word_matches_colour(seed, colour):
seed = "".join([x for x in seed.lower() if x in ascii_letters])
colour = "".join([x for x in colour["name"].lower() if x in ascii_letters])
return seed in colour or colour in seed
def get_contrasting_colours(subject):
selected = list()
for candidate in COLOUR_LIST:
contrast = rgb_contrast(
(
subject["rgb"]["r"] / 255,
subject["rgb"]["g"] / 255,
subject["rgb"]["b"] / 255,
),
(
candidate["rgb"]["r"] / 255,
candidate["rgb"]["g"] / 255,
candidate["rgb"]["b"] / 255,
),
)
if contrast >= CONSTRAST_THRESHOLD:
selected.append(candidate)
return selected
def select_by_seed(candidates, seed):
"""Produces a weighted deterministic colour"""
m = md5()
m.update(seed.encode())
digest = m.hexdigest()
index = int(digest, 16) % len(candidates)
return candidates[index]
def get_colours(seed, tiebreaker=""):
# if the hostname is a colour, try to match it for extra points
matching = [c for c in COLOUR_LIST if word_matches_colour(seed, c)]
if len(matching) > 1:
# hostname is a colour, and has multiple matches. To avoid always
# picking the same shade for a given colour, use the tiebreaker
# (machine-id) to vary the seed
seed += tiebreaker
fg = select_by_seed(matching or COLOUR_LIST, seed)
bg_candidates = get_contrasting_colours(fg)
# remove black, as it's the same colour as the terminal background
bg_candidates = [c for c in bg_candidates if c["colorId"] not in (0, 16)]
bg = select_by_seed(bg_candidates, seed)
# 50% chance swap to remove bias to light foreground -- palette is
# predominately light
return select_by_seed([(fg, bg), (bg, fg)], seed)
def wrap(msg, fg, bg):
return f"\033[48;5;{bg['colorId']}m\033[38;5;{fg['colorId']}m{msg}\033[0m"
def colourise(string):
fg, bg = get_colours(string)
return wrap(string, fg, bg)
# root? Make things red!
if geteuid() == 0:
print("SYSTEM_COLOUR_FG=9")
print("SYSTEM_COLOUR_BG=238")
sys.exit()
hostname = socket.gethostname()
# use simple hostname (not FQDN) for stable value -- search domain could
# otherwise change the colour
hostname = hostname.split(".")[0]
# also, macos has a strange bug that says another host has the same name,
# resulting in appending a number in brackets. Remove the brackets, if there
# are any
hostname = hostname.split("(")[0]
hostname = hostname.split("-")[0]
tiebreaker = ""
if path.exists("/etc/machine-id"):
with open("/etc/machine-id") as f:
tiebreaker = f.read()
fg, bg = get_colours(hostname, tiebreaker)
# NOTE: run against /usr/share/dict/words for a good idea of variance
# print(colourise("".join(argv[1:])))
print("SYSTEM_COLOUR_FG=%s" % fg["colorId"])
print("SYSTEM_COLOUR_BG=%s" % bg["colorId"])
|
|
341f2450ed1f22feb6257a8e873eee9f8d014221
|
beab4b9703df6c4e9bda54fada11a6d985ea2c5a
|
/tests/test_motd.py
|
2e9eeab70628a1d73f46b74ec1be0ff9d3999d31
|
[
"MIT"
] |
permissive
|
sanic-org/sanic
|
d3db62482914061a1f6a8f7d94b6127c2876cb3e
|
47215d4635184bdfb1d5cff000d19390f19219ab
|
refs/heads/main
| 2023-09-05T01:04:31.432228
| 2023-08-30T17:03:22
| 2023-08-30T17:03:22
| 59,720,190
| 3,523
| 439
|
MIT
| 2023-09-14T05:45:11
| 2016-05-26T04:38:22
|
Python
|
UTF-8
|
Python
| false
| false
| 2,412
|
py
|
test_motd.py
|
import logging
import os
import platform
import sys
from unittest.mock import Mock, patch
import pytest
from sanic import __version__
from sanic.application.logo import BASE_LOGO
from sanic.application.motd import MOTD, MOTDTTY
@pytest.fixture(autouse=True)
def reset():
try:
del os.environ["SANIC_MOTD_OUTPUT"]
except KeyError:
...
def test_logo_base(app, run_startup):
logs = run_startup(app)
assert logs[0][1] == logging.DEBUG
assert logs[0][2] == BASE_LOGO
def test_motd_with_expected_info(app, run_startup):
logs = run_startup(app)
assert logs[1][2] == f"Sanic v{__version__}"
assert logs[3][2] == "mode: debug, single worker"
assert logs[4][2] == "server: sanic, HTTP/1.1"
assert logs[5][2] == f"python: {platform.python_version()}"
assert logs[6][2] == f"platform: {platform.platform()}"
def test_motd_init():
_orig = MOTDTTY.set_variables
MOTDTTY.set_variables = Mock()
motd = MOTDTTY(None, "", {}, {})
motd.set_variables.assert_called_once()
MOTDTTY.set_variables = _orig
def test_motd_display(caplog):
motd = MOTDTTY(" foobar ", "", {"one": "1"}, {"two": "2"})
with caplog.at_level(logging.INFO):
motd.display()
version_line = f"Sanic v{__version__}".center(motd.centering_length)
assert (
"".join(caplog.messages)
== f"""
┌────────────────────────────────┐
│ {version_line} │
│ │
├───────────────────────┬────────┤
│ foobar │ one: 1 │
| ├────────┤
│ │ two: 2 │
└───────────────────────┴────────┘
"""
)
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Not on 3.7")
def test_reload_dirs(app):
app.config.LOGO = None
app.config.MOTD = True
app.config.AUTO_RELOAD = True
with patch.object(MOTD, "output") as mock:
app.prepare(
reload_dir="./", auto_reload=True, motd_display={"foo": "bar"}
)
mock.assert_called()
assert mock.call_args.args[2]["auto-reload"] == f"enabled, {os.getcwd()}"
assert mock.call_args.args[3] == {"foo": "bar"}
|
0963041aadb11e8d838b8e1893c771dd4c3da18d
|
e9f0510faac898afc09eb5bb664ac6048f72c33a
|
/tests/test_bvll/test_codec.py
|
dd884d0ad18872c30fe4dbf7832780d6ee864853
|
[
"MIT"
] |
permissive
|
JoelBender/bacpypes
|
c9ddf9a4f56dd0d012046a9e4ffad19bd580cb45
|
a5be2ad5ac69821c12299716b167dd52041b5342
|
refs/heads/master
| 2023-05-28T05:03:17.980236
| 2023-05-10T05:33:01
| 2023-05-10T05:33:01
| 39,159,799
| 284
| 161
|
MIT
| 2023-05-15T15:11:27
| 2015-07-15T20:33:51
|
Python
|
UTF-8
|
Python
| false
| false
| 10,146
|
py
|
test_codec.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Test BVLL Encoding and Decoding
-------------------------------
"""
import string
import unittest
from bacpypes.debugging import bacpypes_debugging, ModuleLogger, btox, xtob
from bacpypes.comm import bind
from bacpypes.pdu import PDU, Address, LocalBroadcast
from bacpypes.bvll import Result, WriteBroadcastDistributionTable, \
ReadBroadcastDistributionTable, ReadBroadcastDistributionTableAck, \
ForwardedNPDU, RegisterForeignDevice, ReadForeignDeviceTable, \
ReadForeignDeviceTableAck, FDTEntry, DeleteForeignDeviceTableEntry, \
DistributeBroadcastToNetwork, OriginalUnicastNPDU, \
OriginalBroadcastNPDU
from bacpypes.bvllservice import AnnexJCodec
from ..trapped_classes import TrappedClient, TrappedServer
from ..state_machine import match_pdu
# some debugging
_debug = 0
_log = ModuleLogger(globals())
@bacpypes_debugging
class TestAnnexJCodec(unittest.TestCase):
def setup_method(self, method):
"""This function is called before each test method is called as is
given a reference to the test method."""
if _debug: TestAnnexJCodec._debug("setup_method %r", method)
# minature trapped stack
self.client = TrappedClient()
self.codec = AnnexJCodec()
self.server = TrappedServer()
bind(self.client, self.codec, self.server)
def request(self, pdu):
"""Pass the PDU to the client to send down the stack."""
self.client.request(pdu)
def indication(self, pdu_type=None, **pdu_attrs):
"""Check what the server received."""
assert match_pdu(self.server.indication_received, pdu_type, **pdu_attrs)
def response(self, pdu):
"""Pass the PDU to the server to send up the stack."""
self.server.response(pdu)
def confirmation(self, pdu_type=None, **pdu_attrs):
"""Check what the client received."""
assert match_pdu(self.client.confirmation_received, pdu_type, **pdu_attrs)
def test_result(self):
"""Test the Result encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_codec_01")
# Request successful
pdu_bytes = xtob('81.00.0006.0000')
self.request(Result(0))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(Result, bvlciResultCode=0)
# Request error condition
pdu_bytes = xtob('81.00.0006.0001')
self.request(Result(1))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(Result, bvlciResultCode=1)
def test_write_broadcast_distribution_table(self):
"""Test the WriteBroadcastDistributionTable encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_write_broadcast_distribution_table")
# write an empty table
pdu_bytes = xtob('81.01.0004')
self.request(WriteBroadcastDistributionTable([]))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(WriteBroadcastDistributionTable, bvlciBDT=[])
# write a table with an element
addr = Address('192.168.0.254/24')
pdu_bytes = xtob('81.01.000e'
'c0.a8.00.fe.ba.c0 ff.ff.ff.00' # address and mask
)
self.request(WriteBroadcastDistributionTable([addr]))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(WriteBroadcastDistributionTable, bvlciBDT=[addr])
def test_read_broadcast_distribution_table(self):
"""Test the ReadBroadcastDistributionTable encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_read_broadcast_distribution_table")
# read the table
pdu_bytes = xtob('81.02.0004')
self.request(ReadBroadcastDistributionTable())
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(ReadBroadcastDistributionTable)
def test_read_broadcast_distribution_table_ack(self):
"""Test the ReadBroadcastDistributionTableAck encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_read_broadcast_distribution_table_ack")
# read returns an empty table
pdu_bytes = xtob('81.03.0004')
self.request(ReadBroadcastDistributionTableAck([]))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(ReadBroadcastDistributionTableAck, bvlciBDT=[])
# read returns a table with an element
addr = Address('192.168.0.254/24')
pdu_bytes = xtob('81.03.000e' # bvlci
'c0.a8.00.fe.ba.c0 ff.ff.ff.00' # address and mask
)
self.request(ReadBroadcastDistributionTableAck([addr]))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(ReadBroadcastDistributionTableAck, bvlciBDT=[addr])
def test_forwarded_npdu(self):
"""Test the ForwardedNPDU encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_forwarded_npdu")
# read returns a table with an element
addr = Address('192.168.0.1')
xpdu = xtob('deadbeef')
pdu_bytes = xtob('81.04.000e' # bvlci
'c0.a8.00.01.ba.c0' # original source address
'deadbeef' # forwarded PDU
)
self.request(ForwardedNPDU(addr, xpdu))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(ForwardedNPDU, bvlciAddress=addr, pduData=xpdu)
def test_register_foreign_device(self):
"""Test the RegisterForeignDevice encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_register_foreign_device")
# register as a foreign device with a 30 second time-to-live
pdu_bytes = xtob('81.05.0006' # bvlci
'001e' # time-to-live
)
self.request(RegisterForeignDevice(30))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(RegisterForeignDevice, bvlciTimeToLive=30)
def test_read_foreign_device_table(self):
"""Test the ReadForeignDeviceTable encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_read_foreign_device_table")
# read returns an empty table
pdu_bytes = xtob('81.06.0004')
self.request(ReadForeignDeviceTable())
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(ReadForeignDeviceTable)
def test_read_foreign_device_table_ack(self):
"""Test the ReadForeignDeviceTableAck encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_read_foreign_device_table_ack")
# read returns an empty table
pdu_bytes = xtob('81.07.0004')
self.request(ReadForeignDeviceTableAck([]))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(ReadForeignDeviceTableAck, bvlciFDT=[])
# read returns a table with one entry
fdte = FDTEntry()
fdte.fdAddress = Address("192.168.0.10")
fdte.fdTTL = 30
fdte.fdRemain = 15
pdu_bytes = xtob('81.07.000e' # bvlci
'c0.a8.00.0a.ba.c0' # address
'001e.000f' # ttl and remaining
)
self.request(ReadForeignDeviceTableAck([fdte]))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(ReadForeignDeviceTableAck, bvlciFDT=[fdte])
def test_delete_foreign_device_table_entry(self):
"""Test the DeleteForeignDeviceTableEntry encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_delete_foreign_device_table_entry")
# delete an element
addr = Address('192.168.0.11/24')
pdu_bytes = xtob('81.08.000a' # bvlci
'c0.a8.00.0b.ba.c0' # address of entry to be deleted
)
self.request(DeleteForeignDeviceTableEntry(addr))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(DeleteForeignDeviceTableEntry, bvlciAddress=addr)
def test_distribute_broadcast_to_network(self):
"""Test the DistributeBroadcastToNetwork encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_distribute_broadcast_to_network")
# read returns a table with an element
xpdu = xtob('deadbeef')
pdu_bytes = xtob('81.09.0008' # bvlci
'deadbeef' # PDU to broadcast
)
self.request(DistributeBroadcastToNetwork(xpdu))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(DistributeBroadcastToNetwork, pduData=xpdu)
def test_original_unicast_npdu(self):
"""Test the OriginalUnicastNPDU encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_original_unicast_npdu")
# read returns a table with an element
xpdu = xtob('deadbeef')
pdu_bytes = xtob('81.0a.0008' # bvlci
'deadbeef' # PDU being unicast
)
self.request(OriginalUnicastNPDU(xpdu))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(OriginalUnicastNPDU, pduData=xpdu)
def test_original_broadcast_npdu(self):
"""Test the OriginalBroadcastNPDU encoding and decoding."""
if _debug: TestAnnexJCodec._debug("test_original_broadcast_npdu")
# read returns a table with an element
xpdu = xtob('deadbeef')
pdu_bytes = xtob('81.0b.0008' # bvlci
'deadbeef' # PDU being broadcast
)
self.request(OriginalBroadcastNPDU(xpdu))
self.indication(pduData=pdu_bytes)
self.response(PDU(pdu_bytes))
self.confirmation(OriginalBroadcastNPDU, pduData=xpdu)
|
dd825fa61aa03d7c8f09c22d89b0598fdea14439
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/hummingbot/core/data_type/trade_fee.py
|
c1fcc0103c134ffc31fd4a4df2f84ba01ff8d306
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 12,975
|
py
|
trade_fee.py
|
import typing
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from decimal import Decimal
from typing import Any, Dict, List, Optional, Type
from hummingbot.connector.utils import combine_to_hb_trading_pair, split_hb_trading_pair
from hummingbot.core.data_type.common import PositionAction, PriceType, TradeType
if typing.TYPE_CHECKING: # avoid circular import problems
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.core.data_type.order_candidate import OrderCandidate
from hummingbot.core.rate_oracle.rate_oracle import RateOracle
S_DECIMAL_0 = Decimal(0)
@dataclass
class TokenAmount:
token: str
amount: Decimal
def __iter__(self):
return iter((self.token, self.amount))
def to_json(self) -> Dict[str, Any]:
return {
"token": self.token,
"amount": str(self.amount),
}
@classmethod
def from_json(cls, data: Dict[str, Any]):
instance = TokenAmount(token=data["token"], amount=Decimal(data["amount"]))
return instance
@dataclass
class TradeFeeSchema:
"""
Contains the necessary information to build a `TradeFee` object.
NOTE: Currently, `percent_fee_token` is only specified if the percent fee is always charged in a particular
token (e.g. the Binance BNB case). To always populate the `percent_fee_token`, this class will require
access to the `exchange` class at runtime to determine the collateral token for the trade (e.g. for derivatives).
This means that, if the `percent_fee_token` is specified, then the fee is always added to the trade
costs, and `buy_percent_fee_deducted_from_returns` cannot be set to `True`.
"""
percent_fee_token: Optional[str] = None
maker_percent_fee_decimal: Decimal = S_DECIMAL_0
taker_percent_fee_decimal: Decimal = S_DECIMAL_0
buy_percent_fee_deducted_from_returns: bool = False
maker_fixed_fees: List[TokenAmount] = field(default_factory=list)
taker_fixed_fees: List[TokenAmount] = field(default_factory=list)
def __post_init__(self):
self.validate_schema()
def validate_schema(self):
if self.percent_fee_token is not None:
assert not self.buy_percent_fee_deducted_from_returns
self.maker_percent_fee_decimal = Decimal(self.maker_percent_fee_decimal)
self.taker_percent_fee_decimal = Decimal(self.taker_percent_fee_decimal)
for i in range(len(self.taker_fixed_fees)):
self.taker_fixed_fees[i] = TokenAmount(
self.taker_fixed_fees[i].token, Decimal(self.taker_fixed_fees[i].amount)
)
for i in range(len(self.maker_fixed_fees)):
self.maker_fixed_fees[i] = TokenAmount(
self.maker_fixed_fees[i].token, Decimal(self.maker_fixed_fees[i].amount)
)
@dataclass
class TradeFeeBase(ABC):
"""
Contains the necessary information to apply the trade fee to a particular order.
"""
percent: Decimal = S_DECIMAL_0
percent_token: Optional[str] = None # only set when fee charged in third token (the Binance BNB case)
flat_fees: List[TokenAmount] = field(default_factory=list) # list of (asset, amount) tuples
@classmethod
@abstractmethod
def type_descriptor_for_json(cls) -> str:
...
@classmethod
def fee_class_for_type(cls, type_descriptor: str):
catalog = {fee_class.type_descriptor_for_json(): fee_class
for fee_class
in [AddedToCostTradeFee, DeductedFromReturnsTradeFee]}
return catalog[type_descriptor]
@classmethod
def new_spot_fee(cls,
fee_schema: TradeFeeSchema,
trade_type: TradeType,
percent: Decimal = S_DECIMAL_0,
percent_token: Optional[str] = None,
flat_fees: Optional[List[TokenAmount]] = None) -> "TradeFeeBase":
fee_cls: Type[TradeFeeBase] = (
AddedToCostTradeFee
if (trade_type == TradeType.BUY and
(not fee_schema.buy_percent_fee_deducted_from_returns
or fee_schema.percent_fee_token is not None))
else DeductedFromReturnsTradeFee)
return fee_cls(
percent=percent,
percent_token=percent_token,
flat_fees=flat_fees or []
)
@classmethod
def new_perpetual_fee(cls,
fee_schema: TradeFeeSchema,
position_action: PositionAction,
percent: Decimal = S_DECIMAL_0,
percent_token: Optional[str] = None,
flat_fees: Optional[List[TokenAmount]] = None) -> "TradeFeeBase":
fee_cls: Type[TradeFeeBase] = (
AddedToCostTradeFee
if position_action == PositionAction.OPEN or fee_schema.percent_fee_token is not None
else DeductedFromReturnsTradeFee
)
return fee_cls(
percent=percent,
percent_token=percent_token,
flat_fees=flat_fees or []
)
@classmethod
def from_json(cls, data: Dict[str, Any]):
fee_class = cls.fee_class_for_type(data["fee_type"])
instance = fee_class(
percent=Decimal(data["percent"]),
percent_token=data["percent_token"],
flat_fees=list(map(TokenAmount.from_json, data["flat_fees"]))
)
return instance
def to_json(self) -> Dict[str, any]:
return {
"fee_type": self.type_descriptor_for_json(),
"percent": str(self.percent),
"percent_token": self.percent_token,
"flat_fees": [token_amount.to_json() for token_amount in self.flat_fees]
}
@property
def fee_asset(self):
first_flat_fee_token = None
if len(self.flat_fees) > 0:
first_flat_fee_token = self.flat_fees[0].token
return self.percent_token or first_flat_fee_token
@abstractmethod
def get_fee_impact_on_order_cost(
self, order_candidate: "OrderCandidate", exchange: "ExchangeBase"
) -> Optional[TokenAmount]:
"""
WARNING: Do not use this method for sizing. Instead, use the `BudgetChecker`.
Returns the impact of the fee on the cost requirements for the candidate order.
"""
...
@abstractmethod
def get_fee_impact_on_order_returns(
self, order_candidate: "OrderCandidate", exchange: "ExchangeBase"
) -> Optional[Decimal]:
"""
WARNING: Do not use this method for sizing. Instead, use the `BudgetChecker`.
Returns the impact of the fee on the expected returns from the candidate order.
"""
...
@staticmethod
def _get_exchange_rate(
trading_pair: str,
exchange: Optional["ExchangeBase"] = None,
rate_source: Optional["RateOracle"] = None # noqa: F821
) -> Decimal:
from hummingbot.core.rate_oracle.rate_oracle import RateOracle
if exchange is not None and trading_pair in exchange.order_books:
rate = exchange.get_price_by_type(trading_pair, PriceType.MidPrice)
else:
local_rate_source: Optional[RateOracle] = rate_source or RateOracle.get_instance()
rate: Decimal = local_rate_source.get_pair_rate(trading_pair)
if rate is None:
raise ValueError(f"Could not find the exchange rate for {trading_pair} using the rate source "
f"{local_rate_source} (please verify it has been correctly configured)")
return rate
def fee_amount_in_token(
self,
trading_pair: str,
price: Decimal,
order_amount: Decimal,
token: str,
exchange: Optional["ExchangeBase"] = None,
rate_source: Optional["RateOracle"] = None # noqa: F821
) -> Decimal:
base, quote = split_hb_trading_pair(trading_pair)
fee_amount: Decimal = S_DECIMAL_0
if self.percent != S_DECIMAL_0:
amount_from_percentage: Decimal = (price * order_amount) * self.percent
if self._are_tokens_interchangeable(quote, token):
fee_amount += amount_from_percentage
else:
conversion_pair: str = combine_to_hb_trading_pair(base=quote, quote=token)
conversion_rate: Decimal = self._get_exchange_rate(conversion_pair, exchange, rate_source)
fee_amount += amount_from_percentage * conversion_rate
for flat_fee in self.flat_fees:
if self._are_tokens_interchangeable(flat_fee.token, token):
# No need to convert the value
fee_amount += flat_fee.amount
elif (self._are_tokens_interchangeable(flat_fee.token, base)
and (self._are_tokens_interchangeable(quote, token))):
# In this case instead of looking for the rate we use directly the price in the parameters
fee_amount += flat_fee.amount * price
else:
conversion_pair: str = combine_to_hb_trading_pair(base=flat_fee.token, quote=token)
conversion_rate: Decimal = self._get_exchange_rate(conversion_pair, exchange, rate_source)
fee_amount += (flat_fee.amount * conversion_rate)
return fee_amount
def _are_tokens_interchangeable(self, first_token: str, second_token: str):
interchangeable_tokens = [
{"WETH", "ETH"},
{"WBNB", "BNB"},
{"WMATIC", "MATIC"},
{"WAVAX", "AVAX"},
{"WONE", "ONE"},
{"USDC", "USDC.E"},
{"WBTC", "BTC"}
]
return first_token == second_token or any(({first_token, second_token} <= interchangeable_pair
for interchangeable_pair
in interchangeable_tokens))
class AddedToCostTradeFee(TradeFeeBase):
@classmethod
def type_descriptor_for_json(cls) -> str:
return "AddedToCost"
def get_fee_impact_on_order_cost(
self, order_candidate: "OrderCandidate", exchange: "ExchangeBase"
) -> Optional[TokenAmount]:
"""
WARNING: Do not use this method for sizing. Instead, use the `BudgetChecker`.
Returns the impact of the fee on the cost requirements for the candidate order.
"""
ret = None
if self.percent != S_DECIMAL_0:
fee_token = self.percent_token or order_candidate.order_collateral.token
if order_candidate.order_collateral is None or fee_token != order_candidate.order_collateral.token:
token, size = order_candidate.get_size_token_and_order_size()
if fee_token == token:
exchange_rate = Decimal("1")
else:
exchange_pair = combine_to_hb_trading_pair(token, fee_token) # buy order token w/ pf token
exchange_rate = exchange.get_price(exchange_pair, is_buy=True)
fee_amount = size * exchange_rate * self.percent
else: # self.percent_token == order_candidate.order_collateral.token
fee_amount = order_candidate.order_collateral.amount * self.percent
ret = TokenAmount(fee_token, fee_amount)
return ret
def get_fee_impact_on_order_returns(
self, order_candidate: "OrderCandidate", exchange: "ExchangeBase"
) -> Optional[Decimal]:
"""
WARNING: Do not use this method for sizing. Instead, use the `BudgetChecker`.
Returns the impact of the fee on the expected returns from the candidate order.
"""
return None
class DeductedFromReturnsTradeFee(TradeFeeBase):
@classmethod
def type_descriptor_for_json(cls) -> str:
return "DeductedFromReturns"
def get_fee_impact_on_order_cost(
self, order_candidate: "OrderCandidate", exchange: "ExchangeBase"
) -> Optional[TokenAmount]:
"""
WARNING: Do not use this method for sizing. Instead, use the `BudgetChecker`.
Returns the impact of the fee on the cost requirements for the candidate order.
"""
return None
def get_fee_impact_on_order_returns(
self, order_candidate: "OrderCandidate", exchange: "ExchangeBase"
) -> Optional[Decimal]:
"""
WARNING: Do not use this method for sizing. Instead, use the `BudgetChecker`.
Returns the impact of the fee on the expected returns from the candidate order.
"""
impact = order_candidate.potential_returns.amount * self.percent
return impact
@dataclass(frozen=True)
class MakerTakerExchangeFeeRates:
maker: Decimal
taker: Decimal
maker_flat_fees: List[TokenAmount]
taker_flat_fees: List[TokenAmount]
|
474de0f9313fe226610d3bb341883b79daeed3f6
|
3982e6daf88e453c726f6b39a081fc37ce15a08a
|
/discovery-provider/src/models/tracks/track.py
|
0b8d3b8c4ee27201171c2906208ace68e0d4c31d
|
[
"Apache-2.0"
] |
permissive
|
AudiusProject/audius-protocol
|
45808e11082608ad5b76a425d287cb6d94a6dab0
|
7cf1d8e378520460d24a7cc8c29e9927c0944cb3
|
refs/heads/main
| 2023-08-09T10:34:28.850436
| 2023-08-09T04:28:17
| 2023-08-09T04:28:17
| 201,821,771
| 531
| 108
|
NOASSERTION
| 2023-09-14T21:27:52
| 2019-08-11T22:31:43
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 4,185
|
py
|
track.py
|
from sqlalchemy import (
Boolean,
Column,
DateTime,
Float,
ForeignKey,
Integer,
PrimaryKeyConstraint,
String,
Text,
text,
)
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import relationship, validates
from src.model_validator import ModelValidator
from src.models.base import Base
from src.models.model_utils import (
RepresentableMixin,
get_fields_to_validate,
validate_field_helper,
)
from src.models.tracks.track_route import TrackRoute
from src.models.users.user import User
class Track(Base, RepresentableMixin):
__tablename__ = "tracks"
blockhash = Column(Text, ForeignKey("blocks.blockhash"), nullable=False)
blocknumber = Column(
Integer, ForeignKey("blocks.number"), index=True, nullable=False
)
track_id = Column(Integer, primary_key=True, nullable=False)
is_current = Column(Boolean, primary_key=True, nullable=False)
is_delete = Column(Boolean, nullable=False)
owner_id = Column(Integer, nullable=False, index=True)
audio_upload_id = Column(String)
preview_cid = Column(String, index=True)
track_cid = Column(
String, index=True
) # todo: after backfill, add nullable=False, both here and in a db migration
title = Column(Text)
duration = Column(Integer)
preview_start_seconds = Column(Float)
cover_art = Column(String)
tags = Column(String)
genre = Column(String)
mood = Column(String)
credits_splits = Column(String)
create_date = Column(String)
release_date = Column(String)
file_type = Column(String)
metadata_multihash = Column(String)
track_segments = Column(JSONB(), nullable=False)
created_at = Column(DateTime, nullable=False, index=True)
description = Column(String)
isrc = Column(String)
iswc = Column(String)
license = Column(String)
updated_at = Column(DateTime, nullable=False)
cover_art_sizes = Column(String)
download = Column(JSONB())
is_unlisted = Column(Boolean, nullable=False, server_default=text("false"))
field_visibility = Column(JSONB(True))
route_id = Column(String)
stem_of = Column(JSONB(True))
remix_of = Column(JSONB(True))
txhash = Column(
String,
primary_key=True,
nullable=False,
server_default=text("''::character varying"),
)
slot = Column(Integer)
is_available = Column(Boolean, nullable=False, server_default=text("true"))
is_premium = Column(Boolean, nullable=False, server_default=text("false"))
premium_conditions = Column(JSONB(True))
is_playlist_upload = Column(Boolean, nullable=False, server_default=text("false"))
ai_attribution_user_id = Column(Integer, nullable=True)
block = relationship( # type: ignore
"Block", primaryjoin="Track.blockhash == Block.blockhash"
)
block1 = relationship( # type: ignore
"Block", primaryjoin="Track.blocknumber == Block.number"
)
_routes = relationship( # type: ignore
TrackRoute,
primaryjoin="and_(\
remote(Track.track_id) == foreign(TrackRoute.track_id),\
TrackRoute.is_current)",
lazy="joined",
viewonly=True,
)
user = relationship( # type: ignore
User,
primaryjoin="and_(\
remote(Track.owner_id) == foreign(User.user_id),\
User.is_current)",
lazy="joined",
viewonly=True,
)
@property
def _slug(self):
return self._routes[0].slug if self._routes else ""
@property
def permalink(self):
if self.user and self.user[0].handle and self._slug:
return f"/{self.user[0].handle}/{self._slug}"
return ""
PrimaryKeyConstraint(is_current, track_id, txhash)
ModelValidator.init_model_schemas("Track")
fields = get_fields_to_validate("Track")
# unpacking args into @validates
@validates(*fields)
def validate_field(self, field, value):
return validate_field_helper(field, value, "Track", getattr(Track, field).type)
def get_attributes_dict(self):
return {col.name: getattr(self, col.name) for col in self.__table__.columns}
|
6391872ee49bbc4e5c8884a6a7b49ee5fd662535
|
332659a711970040fe001e3c3f043b276220ab3e
|
/ntc_rosetta/cli/print_processor.py
|
8088122979a2031296e3442a39509a8c486633db
|
[
"Apache-2.0"
] |
permissive
|
networktocode/ntc-rosetta
|
b1d451e52ce3cda4e0751047b7c1be937c2a908e
|
35fb3f55fb273703e5dd975ed8f3b2d1fdc51d9a
|
refs/heads/develop
| 2023-08-18T00:54:57.879372
| 2023-08-08T21:19:57
| 2023-08-08T21:19:57
| 189,195,038
| 104
| 28
|
Apache-2.0
| 2023-08-30T00:12:27
| 2019-05-29T09:33:55
|
Python
|
UTF-8
|
Python
| false
| false
| 1,066
|
py
|
print_processor.py
|
import json
import click
from ntc_rosetta import get_driver
from ntc_rosetta.yang import get_data_model
from yangify import linter
@click.command("print-parser")
@click.option(
"-j/-t",
"--json/--text",
"to_json",
default=False,
help="output format - default: text",
)
@click.argument("driver")
@click.option(
"-m",
"--model",
default="openconfig",
type=click.Choice(["openconfig", "ntc"]),
help="model to lint - default: openconfig",
)
@click.pass_context
def print_parser(ctx: click.Context, driver: str, model: str, to_json: bool) -> None:
"""
Prints a tree representation of a parser/translator.
Parser/Translator needs to be properly linted for this to work
"""
d = get_driver(driver, model)
dm = get_data_model(model)
lint = linter.Linter.lint(d.parser, dm, recursive=True)
if to_json:
text = json.dumps(lint.to_dict(), indent=4)
else:
text = lint.to_ascii_tree("")
print(text)
def add_commands(cli: click.Group) -> None:
cli.add_command(print_parser)
|
bbba4480ca3c650d14112e5ec8cc52259f323c06
|
aaa72c72c9089a5f4a71f8151ab8304297692680
|
/tests/test_saliency_map_conversion.py
|
18b58b74b9e7a4156425582a28436c64e43fec5c
|
[
"MIT"
] |
permissive
|
matthias-k/pysaliency
|
2569653a727247cc81c0a994acaeface93124ee7
|
0664dba9b637f64b089b3a44b191dd24da84a30e
|
refs/heads/master
| 2023-08-11T08:03:26.527271
| 2022-06-11T21:52:31
| 2022-06-11T21:52:31
| 46,892,512
| 142
| 42
|
MIT
| 2023-07-06T14:03:09
| 2015-11-25T23:08:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,491
|
py
|
test_saliency_map_conversion.py
|
import numpy as np
import pytest
import pysaliency
from pysaliency import optimize_for_information_gain
from pysaliency.models import SaliencyMapNormalizingModel
@pytest.fixture
def stimuli():
return pysaliency.Stimuli([np.random.randint(0, 255, size=(25, 30, 3)) for i in range(50)])
@pytest.fixture
def saliency_model():
return pysaliency.GaussianSaliencyMapModel(center_x=0.15, center_y=0.85, width=0.2)
@pytest.fixture
def transformed_saliency_model(saliency_model):
return pysaliency.saliency_map_models.LambdaSaliencyMapModel(
[saliency_model],
fn=lambda smaps: np.sqrt(smaps[0]),
)
@pytest.fixture
def probabilistic_model(saliency_model):
blurred_model = pysaliency.BluringSaliencyMapModel(saliency_model, kernel_size=5.0)
centerbias_model = pysaliency.saliency_map_models.LambdaSaliencyMapModel(
[pysaliency.GaussianSaliencyMapModel(width=0.5)],
fn=lambda smaps: 1.0 * smaps[0],
)
model_with_centerbias = blurred_model * centerbias_model
probabilistic_model = SaliencyMapNormalizingModel(model_with_centerbias)
return probabilistic_model
@pytest.fixture
def fixations(stimuli, probabilistic_model):
return probabilistic_model.sample(stimuli, 1000, rst=np.random.RandomState(seed=42))
@pytest.fixture(params=["torch", "theano"])
def framework(request):
if request.param == 'theano':
import theano
old_optimizer = theano.config.optimizer
theano.config.optimizer = 'fast_compile'
yield request.param
if request.param == 'theano':
theano.config.optimize = old_optimizer
def test_optimize_for_information_gain(stimuli, fixations, transformed_saliency_model, probabilistic_model, framework):
expected_information_gain = probabilistic_model.information_gain(stimuli, fixations, average='image')
model1, ret1 = optimize_for_information_gain(
transformed_saliency_model,
stimuli,
fixations,
average='fixations',
verbose=2,
batch_size=1 if framework == 'theano' else 10,
minimize_options={'verbose': 10} if framework == 'torch' else None,
maxiter=50,
blur_radius=2.0,
return_optimization_result=True,
framework=framework,
)
reached_information_gain = model1.information_gain(stimuli, fixations, average='image')
print(expected_information_gain, reached_information_gain)
assert reached_information_gain >= expected_information_gain - 0.01
|
77e80acfd9eacaa5b4fe07237c8f0f0965f634a8
|
085cf6512c946d615eda58a3a0d353c0aa1db8cf
|
/deepfence_backend/config/celeryworkerconfig.py
|
11a49222ad08ca9d09e54e1912872821eb83986d
|
[
"Apache-2.0"
] |
permissive
|
deepfence/ThreatMapper
|
00c38c65ed2f014004c9818f03d5e129496b4dd8
|
748b0c8782507eaf351625b9c9fad46903ad6237
|
refs/heads/main
| 2023-08-31T11:13:53.813651
| 2023-03-02T00:49:57
| 2023-03-02T00:49:57
| 238,662,977
| 4,540
| 481
|
Apache-2.0
| 2023-09-14T13:24:37
| 2020-02-06T10:30:09
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
celeryworkerconfig.py
|
imports = (
'tasks.email_sender',
'tasks.notification_worker',
'tasks.common_worker',
# 'tasks.scope',
'tasks.vulnerability_scan_worker',
'tasks.notification',
'tasks.reaper_tasks',
'tasks.task_scheduler',
'tasks.registry_images',
'tasks.running_notification',
'tasks.user_activity',
'tasks.threat_graph',
)
task_create_missing_queues = True
task_acks_late = True
broker_transport_options = {
'visibility_timeout': 86400,
}
task_soft_time_limit = 86400
task_time_limit = 86400 # The worker processing the task will be killed and replaced with a new one when this is exceeded.
|
0a0123f4ae4f6d1a409d769cf0ea52009c91fb1b
|
5a6ccde5f37cc86b6fc0812b2bf40f42eab23906
|
/C-set/785C. Anton and Fairy Tale.py
|
ec84a73c6b2ef9d32ced4b17bc6c5fcdbcd939e4
|
[] |
no_license
|
Waqar-107/Codeforces
|
23f2b1edffb85f6f020107f03e09a455d3e6e792
|
f0d2f25aa6a09c06083b82c39cdf3288ec2eecba
|
refs/heads/master
| 2023-03-09T07:55:46.583363
| 2023-03-04T09:57:44
| 2023-03-04T09:57:44
| 82,915,896
| 196
| 138
| null | 2023-02-11T22:06:20
| 2017-02-23T10:29:34
|
C++
|
UTF-8
|
Python
| false
| false
| 317
|
py
|
785C. Anton and Fairy Tale.py
|
# from dust i have come, dust i will be
n, m = map(int, input().split())
hi = n
lo = m + 1
ans = n
while lo <= hi:
mid = (hi + lo) // 2
x = mid - m
temp = (x * (x + 1)) // 2
if n - m - temp <= 0:
ans = min(ans, mid)
hi = mid - 1
else:
lo = mid + 1
print(ans)
|
7780f11a8667d4ccca9dee906232ccfadc8b5c9c
|
ca8ecd3e65d79e96b4ef39a7ee2319b7a797e0f8
|
/src/memote/experimental/medium.py
|
d162d9eddf39e8aa2b107234056673e4cde83f9d
|
[
"Apache-2.0"
] |
permissive
|
opencobra/memote
|
6567b8cc4bec81f6224b89ce833bb44cdeb1568a
|
81a55a163262a0e06bfcb036d98e8e551edc3873
|
refs/heads/develop
| 2023-07-06T19:37:40.412301
| 2021-10-15T12:21:04
| 2021-10-15T14:42:39
| 58,705,171
| 109
| 26
|
Apache-2.0
| 2023-09-13T13:33:28
| 2016-05-13T05:59:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,881
|
py
|
medium.py
|
# -*- coding: utf-8 -*-
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a class for medium definitions."""
from __future__ import absolute_import
import logging
from memote.experimental.experimental_base import ExperimentalBase
__all__ = ("Medium",)
LOGGER = logging.getLogger(__name__)
class Medium(ExperimentalBase):
"""Represent a specific medium condition."""
SCHEMA = "medium.json"
def __init__(self, **kwargs):
"""
Initialize a medium.
Parameters
----------
kwargs
"""
super(Medium, self).__init__(**kwargs)
def validate(self, model, checks=None):
"""Use a defined schema to validate the medium table format."""
if checks is None:
checks = []
custom = [
{
"unknown-identifier": {
"column": "exchange",
"identifiers": {r.id for r in model.reactions},
}
}
]
super(Medium, self).validate(model=model, checks=checks + custom)
def apply(self, model):
"""Set the defined medium on the given model."""
model.medium = {
row.exchange: row.uptake for row in self.data.itertuples(index=False)
}
|
aad2f0e2543a4d774230898d977fb56455d226fe
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stubs/influxdb-client/influxdb_client/client/invokable_scripts_api.pyi
|
f4b85afbefb3fa73e4d5fd38b48a39c191dd0ad7
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 1,546
|
pyi
|
invokable_scripts_api.pyi
|
from _typeshed import Incomplete
from collections.abc import Generator, Iterator
from typing import Any
from influxdb_client import Script, ScriptCreateRequest, ScriptUpdateRequest
from influxdb_client.client._base import _BaseQueryApi
from influxdb_client.client.flux_table import CSVIterator, FluxRecord, TableList
class InvokableScriptsApi(_BaseQueryApi):
def __init__(self, influxdb_client) -> None: ...
def create_script(self, create_request: ScriptCreateRequest) -> Script: ...
def update_script(self, script_id: str, update_request: ScriptUpdateRequest) -> Script: ...
def delete_script(self, script_id: str) -> None: ...
def find_scripts(self, **kwargs): ...
def invoke_script(self, script_id: str, params: dict[Incomplete, Incomplete] | None = ...) -> TableList: ...
def invoke_script_stream(
self, script_id: str, params: dict[Incomplete, Incomplete] | None = ...
) -> Generator[FluxRecord, Any, None]: ...
def invoke_script_data_frame(
self, script_id: str, params: dict[Incomplete, Incomplete] | None = ..., data_frame_index: list[str] | None = ...
): ...
def invoke_script_data_frame_stream(
self, script_id: str, params: dict[Incomplete, Incomplete] | None = ..., data_frame_index: list[str] | None = ...
): ...
def invoke_script_csv(self, script_id: str, params: dict[Incomplete, Incomplete] | None = ...) -> CSVIterator: ...
def invoke_script_raw(self, script_id: str, params: dict[Incomplete, Incomplete] | None = ...) -> Iterator[list[str]]: ...
|
98733bf3c7eaf5c6fb358c51f7b2380248e39414
|
96dcea595e7c16cec07b3f649afd65f3660a0bad
|
/tests/components/webostv/const.py
|
fbdb9c47c3b2d19ce1be7a21b3078dc0f66c5833
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/core
|
3455eac2e9d925c92d30178643b1aaccf3a6484f
|
80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743
|
refs/heads/dev
| 2023-08-31T15:41:06.299469
| 2023-08-31T14:50:53
| 2023-08-31T14:50:53
| 12,888,993
| 35,501
| 20,617
|
Apache-2.0
| 2023-09-14T21:50:15
| 2013-09-17T07:29:48
|
Python
|
UTF-8
|
Python
| false
| false
| 830
|
py
|
const.py
|
"""Constants for LG webOS Smart TV tests."""
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.webostv.const import LIVE_TV_APP_ID
FAKE_UUID = "some-fake-uuid"
TV_NAME = "fake_webos"
ENTITY_ID = f"{MP_DOMAIN}.{TV_NAME}"
HOST = "1.2.3.4"
CLIENT_KEY = "some-secret"
CHANNEL_1 = {
"channelNumber": "1",
"channelName": "Channel 1",
"channelId": "ch1id",
}
CHANNEL_2 = {
"channelNumber": "20",
"channelName": "Channel Name 2",
"channelId": "ch2id",
}
MOCK_APPS = {
LIVE_TV_APP_ID: {
"title": "Live TV",
"id": LIVE_TV_APP_ID,
"largeIcon": "large-icon",
"icon": "icon",
},
}
MOCK_INPUTS = {
"in1": {"label": "Input01", "id": "in1", "appId": "app0"},
"in2": {"label": "Input02", "id": "in2", "appId": "app1"},
}
|
41e708c1dc549c6b7f15435b8f41b33b2249d71c
|
6c066611b11a8de5e2c22c30cfcc578a4c49edce
|
/GLSL/Blur/Bilateral_GL/Bilateral_GL.py
|
cef4b432cf89565d88eb72b51657d1e404465dbc
|
[] |
no_license
|
NatronGitHub/natron-plugins
|
ad2d9227637b4b86b45f92856fa54d327872a0a6
|
b0c499fb6391024f54be9f26ed41b5cf7475d574
|
refs/heads/master
| 2022-12-12T10:02:20.252222
| 2022-11-30T02:29:04
| 2022-11-30T02:29:04
| 130,576,224
| 332
| 67
| null | 2022-11-30T02:29:05
| 2018-04-22T14:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 17,450
|
py
|
Bilateral_GL.py
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE
# This file was automatically generated by Natron PyPlug exporter version 10.
# Hand-written code should be added in a separate file named Bilateral_GLExt.py
# See http://natron.readthedocs.org/en/master/devel/groups.html#adding-hand-written-code-callbacks-etc
# Note that Viewers are never exported
import NatronEngine
import sys
# Try to import the extensions file where callbacks and hand-written code should be located.
try:
from Bilateral_GLExt import *
except ImportError:
pass
def getPluginID():
return "natron.community.plugins.Bilateral_GL"
def getLabel():
return "Bilateral_GL"
def getVersion():
return 1.01
def getIconPath():
return "Bilateral_GL.png"
def getGrouping():
return "Community/GLSL/Blur"
def getPluginDescription():
return "Bilateral Blur."
def createInstance(app,group):
# Create all nodes in the group
# Create the parameters of the group node the same way we did for all internal nodes
lastNode = group
lastNode.setColor(1, 0.5686, 0.3333)
# Create the user parameters
lastNode.Controls = lastNode.createPageParam("Controls", "Controls")
param = lastNode.createStringParam("sep01", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep01 = param
del param
param = lastNode.createStringParam("sep02", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep02 = param
del param
param = lastNode.createSeparatorParam("SETUP", "Setup")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.SETUP = param
del param
param = lastNode.createStringParam("sep03", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep03 = param
del param
param = lastNode.createStringParam("sep04", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep04 = param
del param
param = lastNode.createDoubleParam("Spatial_Std_Dev", "Spatial Std Dev")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(20, 0)
param.setDefaultValue(10, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Spatial_Std_Dev = param
del param
param = lastNode.createStringParam("sep05", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep05 = param
del param
param = lastNode.createDoubleParam("Value_Std_Dev", "Value Std Dev")
param.setMinimum(-2147483648, 0)
param.setMaximum(2147483647, 0)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(1, 0)
param.setDefaultValue(0.1, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Value_Std_Dev = param
del param
param = lastNode.createStringParam("sep06", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep06 = param
del param
param = lastNode.createStringParam("sep07", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep07 = param
del param
lastNode.Credits = lastNode.createPageParam("Credits", "Credits")
param = lastNode.createStringParam("sep101", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep101 = param
del param
param = lastNode.createStringParam("sep102", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep102 = param
del param
param = lastNode.createSeparatorParam("NAME", "Bilateral_GL v1.01")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.NAME = param
del param
param = lastNode.createStringParam("sep103", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep103 = param
del param
param = lastNode.createStringParam("sep104", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep104 = param
del param
param = lastNode.createSeparatorParam("LINE101", "")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.LINE101 = param
del param
param = lastNode.createStringParam("sep105", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep105 = param
del param
param = lastNode.createStringParam("sep106", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep106 = param
del param
param = lastNode.createSeparatorParam("FR", "ShaderToy 0.8.8")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.FR = param
del param
param = lastNode.createStringParam("sep107", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep107 = param
del param
param = lastNode.createStringParam("sep108", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep108 = param
del param
param = lastNode.createSeparatorParam("CONVERSION", " (Fabrice Fernandez - 2017)")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.CONVERSION = param
del param
param = lastNode.createStringParam("sep109", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep109 = param
del param
param = lastNode.createStringParam("sep110", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep110 = param
del param
# Refresh the GUI with the newly created parameters
lastNode.setPagesOrder(['Controls', 'Credits', 'Node', 'Settings'])
lastNode.refreshUserParamsGUI()
del lastNode
# Start of node "Output2"
lastNode = app.createNode("fr.inria.built-in.Output", 1, group)
lastNode.setLabel("Output2")
lastNode.setPosition(4139, 3997)
lastNode.setSize(80, 32)
lastNode.setColor(0.7, 0.7, 0.7)
groupOutput2 = lastNode
del lastNode
# End of node "Output2"
# Start of node "Source"
lastNode = app.createNode("fr.inria.built-in.Input", 1, group)
lastNode.setScriptName("Source")
lastNode.setLabel("Source")
lastNode.setPosition(4139, 3697)
lastNode.setSize(80, 32)
lastNode.setColor(0.3, 0.5, 0.2)
groupSource = lastNode
del lastNode
# End of node "Source"
# Start of node "Shadertoy1_3"
lastNode = app.createNode("net.sf.openfx.Shadertoy", 1, group)
lastNode.setScriptName("Shadertoy1_3")
lastNode.setLabel("Shadertoy1_3")
lastNode.setPosition(4139, 3834)
lastNode.setSize(80, 32)
lastNode.setColor(0.3, 0.5, 0.2)
groupShadertoy1_3 = lastNode
param = lastNode.getParam("paramValueFloat1")
if param is not None:
param.setValue(0.1, 0)
del param
param = lastNode.getParam("imageShaderSource")
if param is not None:
param.setValue("// https://www.shadertoy.com/view/4dfGDH\n\n// And another filter!\n\n// Adapted for Natron by F. Devernay\n\n// iChannel0: Source, filter=nearest, wrap=clamp\n// BBox: iChannel0\n\nconst vec2 iRenderScale = vec2(1.,1.);\nuniform float sigma_s = 10.0; // Spatial Std Dev (Standard deviation of the spatial kernel in pixel units), min=0., max=20.\nuniform float sigma_r = 0.1; // Value Std Dev (Standard deviation of the range kernel in intensity unit), min=0., max=1.\n#define MSIZE 30 // should be 1.5 times the maximum value for sigma_s\n\nfloat normpdf(in float x, in float sigma)\n{\n\treturn 0.39894*exp(-0.5*x*x/(sigma*sigma))/sigma;\n}\n\nfloat normpdf3(in vec3 v, in float sigma)\n{\n\treturn 0.39894*exp(-0.5*dot(v,v)/(sigma*sigma))/sigma;\n}\n\n\nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n\tvec2 uv = fragCoord.xy / iResolution.xy;\n\tvec3 c = texture2D(iChannel0, uv).rgb;\n\t{\n\t\t//declare stuff\n\t\tint kSize = int(min((MSIZE-1)/2., 1.5*sigma_s*iRenderScale.x));\n\t\tfloat kernel[MSIZE];\n\t\tvec3 final_colour = vec3(0.0);\n\t\t\n\t\t//create the 1-D kernel\n\t\tfloat Z = 0.0;\n\t\tfor (int j = 0; j <= kSize; ++j)\n\t\t{\n\t\t\tkernel[kSize+j] = kernel[kSize-j] = normpdf(float(j), sigma_s*iRenderScale.x);\n\t\t}\n\t\t\n\t\t\n\t\tvec3 cc;\n\t\tfloat factor;\n\t\tfloat bZ = 1.0/normpdf(0.0, sigma_r);\n\t\t//read out the texels\n\t\tfor (int i=-kSize; i <= kSize; ++i)\n\t\t{\n\t\t\tfor (int j=-kSize; j <= kSize; ++j)\n\t\t\t{\n\t\t\t\tcc = texture2D(iChannel0, uv + (vec2(float(i),float(j))) / iResolution.xy).rgb;\n\t\t\t\tfactor = normpdf3(cc-c, sigma_r)*bZ*kernel[kSize+j]*kernel[kSize+i];\n\t\t\t\tZ += factor;\n\t\t\t\tfinal_colour += factor*cc;\n\n\t\t\t}\n\t\t}\n\t\t\n\t\t\n\t\tfragColor = vec4(final_colour/Z, 1.0);\n\t}\n}\n")
del param
param = lastNode.getParam("mipmap0")
if param is not None:
param.set("nearest")
del param
param = lastNode.getParam("inputLabel0")
if param is not None:
param.setValue("Source")
del param
param = lastNode.getParam("inputEnable1")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable2")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable3")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("bbox")
if param is not None:
param.set("iChannel0")
del param
param = lastNode.getParam("NatronParamFormatChoice")
if param is not None:
param.set("PC_Video")
del param
param = lastNode.getParam("mouseParams")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("paramCount")
if param is not None:
param.setValue(2, 0)
del param
param = lastNode.getParam("paramType0")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName0")
if param is not None:
param.setValue("sigma_s")
del param
param = lastNode.getParam("paramLabel0")
if param is not None:
param.setValue("Spatial Std Dev")
del param
param = lastNode.getParam("paramHint0")
if param is not None:
param.setValue("Standard deviation of the spatial kernel in pixel units")
del param
param = lastNode.getParam("paramDefaultFloat0")
if param is not None:
param.setValue(10, 0)
del param
param = lastNode.getParam("paramMinFloat0")
if param is not None:
param.setValue(0, 0)
del param
param = lastNode.getParam("paramMaxFloat0")
if param is not None:
param.setValue(20, 0)
del param
param = lastNode.getParam("paramType1")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName1")
if param is not None:
param.setValue("sigma_r")
del param
param = lastNode.getParam("paramLabel1")
if param is not None:
param.setValue("Value Std Dev")
del param
param = lastNode.getParam("paramHint1")
if param is not None:
param.setValue("Standard deviation of the range kernel in intensity unit")
del param
param = lastNode.getParam("paramDefaultFloat1")
if param is not None:
param.setValue(0.09999999999999999, 0)
del param
param = lastNode.getParam("paramMinFloat1")
if param is not None:
param.setValue(0, 0)
del param
param = lastNode.getParam("paramMaxFloat1")
if param is not None:
param.setValue(1, 0)
del param
del lastNode
# End of node "Shadertoy1_3"
# Now that all nodes are created we can connect them together, restore expressions
groupOutput2.connectInput(0, groupShadertoy1_3)
groupShadertoy1_3.connectInput(0, groupSource)
param = groupShadertoy1_3.getParam("paramValueFloat0")
param.slaveTo(group.getParam("Spatial_Std_Dev"), 0, 0)
del param
param = groupShadertoy1_3.getParam("paramValueFloat1")
param.slaveTo(group.getParam("Value_Std_Dev"), 0, 0)
del param
try:
extModule = sys.modules["Bilateral_GLExt"]
except KeyError:
extModule = None
if extModule is not None and hasattr(extModule ,"createInstanceExt") and hasattr(extModule.createInstanceExt,"__call__"):
extModule.createInstanceExt(app,group)
|
d438423d824bb33f4f8983535196fa2135ac6349
|
e7f2a8c466c14b9821e59740ed0407107e1254a4
|
/examples/rules/actions/actions.py
|
c7e37736402c5dec00dd59e4fb6ef3b7a50516a2
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0",
"MIT"
] |
permissive
|
RasaHQ/rasa
|
4a31134308a9a4d8824fe7faef02526accdd0f19
|
50857610bdf0c26dc61f3203a6cbb4bcf193768c
|
refs/heads/main
| 2023-08-28T01:53:56.981600
| 2023-08-25T10:20:49
| 2023-08-25T10:20:49
| 70,908,208
| 13,167
| 3,739
|
Apache-2.0
| 2023-09-14T09:54:40
| 2016-10-14T12:27:49
|
Python
|
UTF-8
|
Python
| false
| false
| 881
|
py
|
actions.py
|
from typing import Dict, Text, List
from rasa_sdk import Tracker
from rasa_sdk.events import EventType
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk import Action
from rasa_sdk.events import SlotSet
class ActionSwitchFAQ(Action):
def name(self) -> Text:
return "action_switch_faq"
def run(
self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict
) -> List[EventType]:
return [SlotSet("detailed_faq", not tracker.get_slot("detailed_faq"))]
class ValidateSlots(Action):
def name(self) -> Text:
"""Unique identifier of the form"""
return "action_validate_loop_q_form"
def run(
self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict
) -> List[EventType]:
dispatcher.utter_message("validate_some_slot")
return [SlotSet("some_slot", "sdk")]
|
25269a29709130cd38d232604470b81878dd1300
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/11_动态规划/背包问题/01背包/1049. 最后一块石头的重量 II.py
|
d0de6d354b135fb5fbe11f2082a9f8a0e6b57dcc
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
1049. 最后一块石头的重量 II.py
|
# 1049. 最后一块石头的重量 II
from typing import List
INF = int(1e20)
class Solution:
def lastStoneWeightII(self, nums: List[int]) -> int:
dp = set([0])
for num in nums:
ndp = set()
for pre in dp:
ndp.add(pre + num)
ndp.add(pre - num)
dp = ndp
res = INF
for num in dp:
if num >= 0:
res = min(res, num)
return res
print(Solution().lastStoneWeightII(nums=[1, 2, 5]))
|
0ff0ac34a3d8b456076f5e9d5a8164b959e46dd1
|
52a677b94056d3397b4a499bc9185adb68a63f05
|
/workers/buildlogsarchiver/test/test_buildlogsarchiver.py
|
55194bbd5ee79162e5e13fca084d467da08658f1
|
[
"Apache-2.0"
] |
permissive
|
quay/quay
|
9b6fcff54efc0dbf7c6d91fa80676950555b6f1a
|
e400a0c22c5f89dd35d571654b13d262b1f6e3b3
|
refs/heads/master
| 2023-08-28T15:08:38.001842
| 2023-08-28T13:52:31
| 2023-08-28T13:52:31
| 220,517,730
| 2,363
| 322
|
Apache-2.0
| 2023-09-14T17:43:48
| 2019-11-08T17:37:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
test_buildlogsarchiver.py
|
from test.fixtures import *
from mock import Mock, patch
from app import storage
from workers.buildlogsarchiver.buildlogsarchiver import ArchiveBuildLogsWorker
from workers.buildlogsarchiver.models_pre_oci import pre_oci_model as model
def test_logarchiving(app):
worker = ArchiveBuildLogsWorker()
logs_mock = Mock()
logs_mock.get_log_entries = Mock(return_value=(1, [{"some": "entry"}]))
# Add a build that is ready for archiving.
build = model.create_build_for_testing()
with patch("workers.buildlogsarchiver.buildlogsarchiver.build_logs", logs_mock):
worker._archive_redis_buildlogs()
# Ensure the get method was called.
logs_mock.get_log_entries.assert_called_once()
logs_mock.expire_status.assert_called_once()
logs_mock.delete_log_entries.assert_called_once()
# Ensure the build was marked as archived.
assert model.get_build(build.uuid).logs_archived
# Ensure a file was written to storage.
assert storage.exists(["local_us"], "logarchive/%s" % build.uuid)
|
81d47d12bb26f84fa59d30f8cec2bbe36a8285c5
|
b964ac1b4c25dff0c45807f7132d64c941845d78
|
/lyrebird/config/__init__.py
|
c64507f0e5cdc941cc8c2a0b2fec1c6e36f08ce1
|
[
"MIT"
] |
permissive
|
Meituan-Dianping/lyrebird
|
6db7fe3be32d74565bbcaa0491f03dc72d506214
|
b1ec5b081660c63e696454b63dd2f3c2b93a85d9
|
refs/heads/master
| 2023-08-25T14:44:01.580972
| 2023-08-23T10:04:00
| 2023-08-23T10:04:00
| 140,687,246
| 963
| 175
|
MIT
| 2023-09-11T09:10:58
| 2018-07-12T08:50:59
|
Python
|
UTF-8
|
Python
| false
| false
| 9,173
|
py
|
__init__.py
|
import json
import codecs
import jinja2
from os import path
from pathlib import Path
from copy import deepcopy
from packaging import version
from lyrebird import log as nlog
from lyrebird import application
from lyrebird.config.diff_mode import SettingDiffMode
from lyrebird.config.checker_switch import SettingCheckerSwitch
from .keywords import *
logger = nlog.get_logger()
config_template = {
"version": "0.10.5",
"proxy.filters": [],
"proxy.port": 4272,
"mock.port": 9090,
"mock.data": path.join("{{current_dir}}", "mock_data", "personal"),
"mock.proxy_headers": {
"scheme": "MKScheme",
"host": "MKOriginHost",
"port": "MKOriginPort"
}
}
CONFIG_FUNC_MAP = {
'checker.switch': SettingCheckerSwitch,
'mock.mode': SettingDiffMode
}
personal_config_template = {
"event.broken_database_path_list": []
}
class ConfigManager():
ROOT = Path('~/.lyrebird').expanduser()
DEFAULT_FILENAME = 'conf.json'
DEFAULT_PERSONAL_FILENAME = 'personal_conf.json'
BASE_CONFIG = ROOT / DEFAULT_FILENAME
PERSONAL_CONFIG = ROOT / DEFAULT_PERSONAL_FILENAME
FORBIDDEN_MODIFY_FIELDS_IN_CONFIG = set(['version', 'proxy.port', 'mock.port'])
def __init__(self, conf_path_list=None, custom_conf=None):
self.config = config_template
self.config_root = self.ROOT
self.conf_file = self.BASE_CONFIG
self.config_list = []
# Current personal config only supports checking whether lyrebird.db is broken.
self.personal_config = personal_config_template
self.personal_conf_file = self.PERSONAL_CONFIG
self.update_base_config()
self.read_config()
if conf_path_list:
for conf_path in conf_path_list:
self.update_conf_source(conf_path)
if custom_conf:
self.update_conf_custom(custom_conf)
self.initialize_personal_config()
def update_conf_source(self, path):
input_path: Path = Path(path).expanduser().absolute()
if input_path.is_dir():
input_root = input_path
input_file = input_path / self.DEFAULT_FILENAME
else:
input_root = input_path.parent
input_file = input_path
if not input_file.exists():
logger.error(f'Config {input_file} not found!')
else:
self.config_root = input_root
self.conf_file = input_file
self.read_config()
def update_conf_custom(self, custom_conf):
self.add_config(custom_conf, rank=-1)
def contains_forbidden_modify_field(self, update_conf: dict):
union_fields = self.FORBIDDEN_MODIFY_FIELDS_IN_CONFIG & update_conf.keys()
return union_fields if len(union_fields) > 0 else None
def override_config_field(self, update_conf: dict):
"""
Update Application config by ``config.update(update_conf)``.
If update_conf contains ``FORBIDDEN_MODIFY_FIELDS_IN_CONFIG``, raise ``ConfigException``.
"""
if not update_conf:
return
forbidden_modify_fields = self.contains_forbidden_modify_field(update_conf)
if forbidden_modify_fields:
raise ConfigException(f'Config field cannot be modified: {forbidden_modify_fields}')
self.add_config(update_conf, type='api_patch', apply_now=True)
logger.debug(f'Need update config fields: {update_conf}')
self.config.update(update_conf)
logger.debug(f'Update done. config: {self.config}')
application.server['event'].publish('config_update', {'config_update' : {'data': update_conf}})
def read_config(self):
template_env = jinja2.Environment(loader=jinja2.FileSystemLoader(str(self.config_root)))
template = template_env.get_template(self.conf_file.name)
current_dir = str(self.config_root)
download_dir = str(self.ROOT / 'downloads')
conf_str = template.render(current_dir=json.dumps(current_dir).strip('"'), download_dir=json.dumps(download_dir).strip('"'))
loaded_config = json.loads(conf_str)
self.add_config(loaded_config, rank=-10)
def write_config(self):
self.config_root.mkdir(parents=True, exist_ok=True)
with codecs.open(self.conf_file, 'w', 'utf-8') as f:
f.write(json.dumps(self.config, indent=4, ensure_ascii=False))
def update_base_config(self):
if self.BASE_CONFIG.exists() and self.BASE_CONFIG.is_file():
with codecs.open(self.BASE_CONFIG, 'r', 'utf-8') as f:
base_conf = json.load(f)
if version.parse(base_conf.get('version', '0.0.0')) < version.parse(config_template.get('version', '0.0.0')):
self.write_config()
else:
self.write_config()
def add_config(self, config_dict: dict, rank=0, type='', override_same_type=False, level=1, apply_now=False) -> None:
config = Config(config_dict)
config.type = type
if override_same_type:
for c in self.config_list[::-1]:
if c.type == type:
self.config_list.remove(c)
self.config_list.append(config)
self.merge_config(self.config, config.config, level=level, apply_now=apply_now)
def merge_config(self, origin_config, new_config, level=-1, apply_now=False):
for key_child in list(new_config.keys()):
self.merge_generator(key_child, origin_config, new_config, level)
if apply_now:
self.add_each_config_item(new_config)
def merge_generator(self, key, origin_config, new_config, level):
if level == 0:
return
if key not in origin_config:
origin_config[key] = deepcopy(new_config[key])
elif origin_config[key] == new_config[key]:
return
elif level == 1:
origin_config[key] = deepcopy(new_config[key])
elif type(origin_config[key]) != type(new_config[key]):
origin_config[key] = deepcopy(new_config[key])
elif isinstance(new_config[key], list):
origin_config[key] = deepcopy(new_config[key])
elif isinstance(new_config[key], dict):
for key_child in list(new_config[key].keys()):
self.merge_generator(key_child, origin_config[key], new_config[key], level-1)
else:
origin_config[key] = new_config[key]
def add_each_config_item(self, config):
# 处理第一层的key
for key, value in config.items():
if key not in CONFIG_FUNC_MAP:
continue
CONFIG_FUNC_MAP[key].add(value)
def remove_config(self, config, type='', level=-1, apply_now=False):
remove_config = None
recover_config = None
for c in self.config_list[::-1]:
if remove_config:
recover_config = c
break
if c.type == type:
remove_config = c
self.unmerge_config(self.config, remove_config.config, level=level, apply_now=apply_now)
self.merge_config(self.config, recover_config.config, level=level, apply_now=apply_now)
# todo handle the config added after remove_config
self.config_list.remove(remove_config)
def unmerge_config(self, origin_config, remove_config, level=-1, apply_now=False):
for key_child in list(remove_config.keys()):
self.unmerge_generator(key_child, origin_config, remove_config, level=level)
if apply_now:
self.remove_each_config_item(remove_config)
def unmerge_generator(self, key, origin_config, remove_config, level):
if level == 0:
return
if key not in origin_config:
return
if level == 1:
origin_config.pop(key, None)
elif isinstance(remove_config[key], dict):
for key_child in list(remove_config[key].keys()):
self.unmerge_generator(key_child, origin_config[key], remove_config[key], level-1)
else:
origin_config.pop(key, None)
def remove_each_config_item(self, config):
# 处理第一层的key
for key, value in config.items():
if key not in CONFIG_FUNC_MAP:
continue
CONFIG_FUNC_MAP[key].remove(value)
def initialize_personal_config(self):
if not self.personal_conf_file.exists():
self.write_personal_config()
self.personal_config = self.read_personal_config()
def update_personal_config(self, config_dict: dict):
self.personal_config = config_dict
self.write_personal_config()
def read_personal_config(self):
with codecs.open(self.personal_conf_file, 'r', 'utf-8') as f:
return json.load(f)
def write_personal_config(self):
with codecs.open(self.personal_conf_file, 'w', 'utf-8') as f:
f.write(json.dumps(self.personal_config, indent=4, ensure_ascii=False))
class Config:
def __init__(self, config):
self.rank = 0
self.type = ''
self.config = config
class ConfigException(Exception):
pass
|
83156cbe03bbe8135116df42a8a0c2c8ae5267a3
|
e03bce53de6f88c0e09f56e4fe11c36af0f1161f
|
/typings/awacs/cassandra.pyi
|
338ce43e8267894407dd08684ec7168102e91469
|
[
"Apache-2.0"
] |
permissive
|
onicagroup/runway
|
20c31df9cbc1a1ffc5c9aa468ce5cf7d6ac7899f
|
0763b06aee07d2cf3f037a49ca0cb81a048c5deb
|
refs/heads/master
| 2023-08-30T22:35:54.113981
| 2023-08-29T14:13:35
| 2023-08-29T14:13:35
| 122,529,924
| 156
| 79
|
Apache-2.0
| 2023-09-13T13:43:50
| 2018-02-22T20:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 597
|
pyi
|
cassandra.pyi
|
"""
This type stub file was generated by pyright.
"""
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Keyspaces (for Apache Cassandra)"
prefix = "cassandra"
class Action(BaseAction):
def __init__(self, action=...) -> None: ...
class ARN(BaseARN):
def __init__(self, resource=..., region=..., account=...) -> None: ...
Alter = Action("Alter")
Create = Action("Create")
Drop = Action("Drop")
Modify = Action("Modify")
Restore = Action("Restore")
Select = Action("Select")
TagResource = Action("TagResource")
UntagResource = Action("UntagResource")
|
8fb9597377ec47a63145db12f97dc11b86a36218
|
952d8c37764393aa180f53ba4d31bec6c0806fd6
|
/py-polars/tests/unit/io/test_lazy_json.py
|
8c16e9039e2c93ce55615355be0af774b646d7e1
|
[
"MIT"
] |
permissive
|
pola-rs/polars
|
dff713f82f0cc29a98bc3f0e3ee3ba1e0fb49ef3
|
c50c1e69bd36f60a8864ea49fe40d0e17503f11c
|
refs/heads/main
| 2023-08-28T00:13:27.043234
| 2023-08-27T18:34:52
| 2023-08-27T18:34:52
| 263,727,855
| 18,211
| 1,202
|
MIT
| 2023-09-14T18:52:43
| 2020-05-13T19:45:33
|
Rust
|
UTF-8
|
Python
| false
| false
| 2,841
|
py
|
test_lazy_json.py
|
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
import polars as pl
from polars.testing import assert_frame_equal
if TYPE_CHECKING:
from pathlib import Path
@pytest.fixture()
def foods_ndjson_path(io_files_path: Path) -> Path:
return io_files_path / "foods1.ndjson"
def test_scan_ndjson(foods_ndjson_path: Path) -> None:
df = pl.scan_ndjson(foods_ndjson_path, row_count_name="row_count").collect()
assert df["row_count"].to_list() == list(range(27))
df = (
pl.scan_ndjson(foods_ndjson_path, row_count_name="row_count")
.filter(pl.col("category") == pl.lit("vegetables"))
.collect()
)
assert df["row_count"].to_list() == [0, 6, 11, 13, 14, 20, 25]
df = (
pl.scan_ndjson(foods_ndjson_path, row_count_name="row_count")
.with_row_count("foo", 10)
.filter(pl.col("category") == pl.lit("vegetables"))
.collect()
)
assert df["foo"].to_list() == [10, 16, 21, 23, 24, 30, 35]
@pytest.mark.write_disk()
def test_scan_with_projection(tmp_path: Path) -> None:
tmp_path.mkdir(exist_ok=True)
json = r"""
{"text": "\"hello", "id": 1}
{"text": "\n{\n\t\t\"inner\": \"json\n}\n", "id": 10}
{"id": 0, "text":"\"","date":"2013-08-03 15:17:23"}
{"id": 1, "text":"\"123\"","date":"2009-05-19 21:07:53"}
{"id": 2, "text":"/....","date":"2009-05-19 21:07:53"}
{"id": 3, "text":"\n\n..","date":"2"}
{"id": 4, "text":"\"'/\n...","date":"2009-05-19 21:07:53"}
{"id": 5, "text":".h\"h1hh\\21hi1e2emm...","date":"2009-05-19 21:07:53"}
{"id": 6, "text":"xxxx....","date":"2009-05-19 21:07:53"}
{"id": 7, "text":".\"quoted text\".","date":"2009-05-19 21:07:53"}
"""
json_bytes = bytes(json, "utf-8")
file_path = tmp_path / "escape_chars.json"
file_path.write_bytes(json_bytes)
actual = pl.scan_ndjson(file_path).select(["id", "text"]).collect()
expected = pl.DataFrame(
{
"id": [1, 10, 0, 1, 2, 3, 4, 5, 6, 7],
"text": [
'"hello',
'\n{\n\t\t"inner": "json\n}\n',
'"',
'"123"',
"/....",
"\n\n..",
"\"'/\n...",
'.h"h1hh\\21hi1e2emm...',
"xxxx....",
'."quoted text".',
],
}
)
assert_frame_equal(actual, expected)
def test_glob_n_rows(io_files_path: Path) -> None:
file_path = io_files_path / "foods*.ndjson"
df = pl.scan_ndjson(file_path, n_rows=40).collect()
# 27 rows from foods1.ndjson and 13 from foods2.ndjson
assert df.shape == (40, 4)
# take first and last rows
assert df[[0, 39]].to_dict(False) == {
"category": ["vegetables", "seafood"],
"calories": [45, 146],
"fats_g": [0.5, 6.0],
"sugars_g": [2, 2],
}
|
27363d98c53938099462ffd585c8a94e8e78bc23
|
7e521e8c2279f0893cc3a4dd1f30dd4540ba9120
|
/addresses/models.py
|
c321d915bdbd495a73ceca12968d31bd702e40d6
|
[
"Apache-2.0"
] |
permissive
|
blockcypher/explorer
|
9b95a526ad50f21dff33b840dc23ff8329fca1bb
|
01f01c04e3b1f6893785a5e3ae949aa237760fb4
|
refs/heads/master
| 2023-08-30T00:07:47.074818
| 2023-07-10T15:39:04
| 2023-07-10T15:39:04
| 26,288,508
| 1,130
| 758
|
Apache-2.0
| 2023-09-05T13:27:34
| 2014-11-06T20:28:27
|
Python
|
UTF-8
|
Python
| false
| false
| 4,505
|
py
|
models.py
|
from django.db import models
from django.utils.timezone import now
from blockcypher.constants import COIN_CHOICES, COIN_SYMBOL_MAPPINGS
from emails.trigger import send_and_log
class AddressSubscription(models.Model):
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
unsubscribed_at = models.DateTimeField(blank=True, null=True, db_index=True, help_text='User disabled')
disabled_at = models.DateTimeField(blank=True, null=True, db_index=True, help_text='Admin disabled')
coin_symbol = models.CharField(choices=COIN_CHOICES, max_length=16, null=False, blank=False, db_index=True)
b58_address = models.CharField(blank=False, null=False, max_length=64, db_index=True)
notify_on_broadcast = models.BooleanField(db_index=True, default=True)
notify_on_first_confirm = models.BooleanField(db_index=True, default=False)
notify_on_sixth_confirm = models.BooleanField(db_index=True, default=True)
notify_on_deposit = models.BooleanField(db_index=True, default=True)
notify_on_withdrawal = models.BooleanField(db_index=True, default=True)
auth_user = models.ForeignKey('users.AuthUser', blank=False, null=False, on_delete=models.CASCADE)
blockcypher_id = models.CharField(max_length=64, null=False, blank=False, db_index=True)
address_forwarding_obj = models.ForeignKey('addresses.AddressForwarding', blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return '%s to %s' % (self.id, self.b58_address)
def get_currency_abbrev(self):
return COIN_SYMBOL_MAPPINGS[self.coin_symbol]['currency_abbrev']
def get_currency_display_name(self):
return COIN_SYMBOL_MAPPINGS[self.coin_symbol]['display_name']
def send_notifications_welcome_email(self):
# TODO: add abuse check so you can only send this email to an unconfirmed user X times
b58_address = self.b58_address
context_dict = {
'b58_address': b58_address,
'cs_display': COIN_SYMBOL_MAPPINGS[self.coin_symbol]['display_name']
}
return send_and_log(
subject='Please Confirm Your Email Subscription to %s' % b58_address,
body_template='new_user_confirmation.html',
to_user=self.auth_user,
body_context=context_dict,
fkey_objs={'address_subscription': self},
)
def user_unsubscribe_subscription(self):
self.unsubscribed_at = now()
self.save()
return self
def admin_unsubscribe_subscription(self):
self.disabled_at = now()
self.save()
return self
class AddressForwarding(models.Model):
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
archived_at = models.DateTimeField(blank=True, null=True, db_index=True)
coin_symbol = models.CharField(choices=COIN_CHOICES, max_length=16, null=False, blank=False, db_index=True)
initial_address = models.CharField(blank=False, null=False, max_length=64, db_index=True)
destination_address = models.CharField(blank=False, null=False, max_length=64, db_index=True)
auth_user = models.ForeignKey('users.AuthUser', blank=True, null=True, on_delete=models.CASCADE)
blockcypher_id = models.CharField(max_length=64, null=False, blank=False, db_index=True)
def __str__(self):
return '%s to %s' % (self.initial_address, self.destination_address)
def get_currency_abbrev(self):
return COIN_SYMBOL_MAPPINGS[self.coin_symbol]['currency_abbrev']
def get_currency_display_name(self):
return COIN_SYMBOL_MAPPINGS[self.coin_symbol]['display_name']
def send_forwarding_welcome_email(self):
# TODO: add abuse check so you can only send this email to an unconfirmed user X times
if not self.auth_user:
return
context_dict = {
'initial_address': self.initial_address,
'destination_address': self.destination_address,
'cs_display': COIN_SYMBOL_MAPPINGS[self.coin_symbol]['display_name']
}
fkey_objs = {
'address_forwarding': self,
}
return send_and_log(
subject='Please Confirm Your Email Subscription to %s' % self.initial_address,
body_template='new_user_forwarding.html',
to_user=self.auth_user,
body_context=context_dict,
fkey_objs=fkey_objs,
)
|
a92593587a0525cf4973999945fb20eff767650d
|
6c066611b11a8de5e2c22c30cfcc578a4c49edce
|
/GLSL/Source/Crok_checkerboard_GL/Crok_checkerboard_GL.py
|
b86b30d27656bcb200e432660d17225a1425e84f
|
[] |
no_license
|
NatronGitHub/natron-plugins
|
ad2d9227637b4b86b45f92856fa54d327872a0a6
|
b0c499fb6391024f54be9f26ed41b5cf7475d574
|
refs/heads/master
| 2022-12-12T10:02:20.252222
| 2022-11-30T02:29:04
| 2022-11-30T02:29:04
| 130,576,224
| 332
| 67
| null | 2022-11-30T02:29:05
| 2018-04-22T14:39:29
|
Python
|
UTF-8
|
Python
| false
| false
| 29,712
|
py
|
Crok_checkerboard_GL.py
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE
# This file was automatically generated by Natron PyPlug exporter version 10.
# Hand-written code should be added in a separate file named Crok_checkerboard_GLExt.py
# See http://natron.readthedocs.org/en/master/devel/groups.html#adding-hand-written-code-callbacks-etc
# Note that Viewers are never exported
import NatronEngine
import sys
# Try to import the extensions file where callbacks and hand-written code should be located.
try:
from Crok_checkerboard_GLExt import *
except ImportError:
pass
def getPluginID():
return "natron.community.plugins.Crok_checkerboard_GL"
def getLabel():
return "Crok_checkerboard_GL"
def getVersion():
return 1.0
def getIconPath():
return "Crok_checkerboard_GL.png"
def getGrouping():
return "Community/GLSL/Source"
def getPluginDescription():
return "Creates a checkerboard pattern."
def createInstance(app,group):
# Create all nodes in the group
# Create the parameters of the group node the same way we did for all internal nodes
lastNode = group
lastNode.setColor(0.9529, 0.4314, 1)
# Create the user parameters
lastNode.Controls = lastNode.createPageParam("Controls", "Controls")
param = lastNode.createStringParam("sep01", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep01 = param
del param
param = lastNode.createStringParam("sep02", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep02 = param
del param
param = lastNode.createSeparatorParam("TRANSFORM", "Transform")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.TRANSFORM = param
del param
param = lastNode.createStringParam("sep03", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep03 = param
del param
param = lastNode.createStringParam("sep04", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep04 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat1", "Zoom : ")
param.setMinimum(2, 0)
param.setMaximum(1000, 0)
param.setDisplayMinimum(2, 0)
param.setDisplayMaximum(250, 0)
param.setDefaultValue(10, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat1 = param
del param
param = lastNode.createStringParam("sep05", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep05 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat0", "Rotation : ")
param.setMinimum(-10000, 0)
param.setMaximum(10000, 0)
param.setDisplayMinimum(-360, 0)
param.setDisplayMaximum(360, 0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat0 = param
del param
param = lastNode.createStringParam("sep06", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep06 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat3", "Aspect : ")
param.setMinimum(0.01, 0)
param.setMaximum(99.99999999999999, 0)
param.setDisplayMinimum(0.01, 0)
param.setDisplayMaximum(50, 0)
param.setDefaultValue(1, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat3 = param
del param
param = lastNode.createStringParam("sep07", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep07 = param
del param
param = lastNode.createDoubleParam("Shadertoy1_2paramValueFloat2", "Blur : ")
param.setMinimum(0, 0)
param.setMaximum(10, 0)
param.setDisplayMinimum(0, 0)
param.setDisplayMaximum(10, 0)
param.setDefaultValue(3, 0)
param.restoreDefaultValue(0)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueFloat2 = param
del param
param = lastNode.createStringParam("sep08", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep08 = param
del param
param = lastNode.createStringParam("sep09", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep09 = param
del param
param = lastNode.createSeparatorParam("COLOURS", "Colours")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.COLOURS = param
del param
param = lastNode.createStringParam("sep10", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep10 = param
del param
param = lastNode.createStringParam("sep11", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep11 = param
del param
param = lastNode.createColorParam("Shadertoy1_2paramValueVec44", "Color 1 : ", True)
param.setDefaultValue(1, 0)
param.restoreDefaultValue(0)
param.setDefaultValue(1, 1)
param.restoreDefaultValue(1)
param.setDefaultValue(1, 2)
param.restoreDefaultValue(2)
param.setDefaultValue(1, 3)
param.restoreDefaultValue(3)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueVec44 = param
del param
param = lastNode.createStringParam("sep12", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep12 = param
del param
param = lastNode.createColorParam("Shadertoy1_2paramValueVec45", "Color 2 : ", True)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2paramValueVec45 = param
del param
param = lastNode.createStringParam("sep13", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep13 = param
del param
param = lastNode.createStringParam("sep14", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep14 = param
del param
param = lastNode.createSeparatorParam("OUTPUT", "Output")
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.OUTPUT = param
del param
param = lastNode.createStringParam("sep15", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep15 = param
del param
param = lastNode.createStringParam("sep16", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep16 = param
del param
param = lastNode.createChoiceParam("Shadertoy1_2bbox", "Output BBox : ")
param.setDefaultValue(1)
param.restoreDefaultValue()
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(True)
param.setAnimationEnabled(True)
lastNode.Shadertoy1_2bbox = param
del param
param = lastNode.createChoiceParam("Shadertoy1_2NatronParamFormatChoice", "Format : ")
param.setDefaultValue(6)
param.restoreDefaultValue()
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setAddNewLine(False)
param.setAnimationEnabled(False)
lastNode.Shadertoy1_2NatronParamFormatChoice = param
del param
param = lastNode.createStringParam("sep17", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep17 = param
del param
param = lastNode.createStringParam("sep18", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Controls.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep18 = param
del param
lastNode.Credits = lastNode.createPageParam("Credits", "Credits")
param = lastNode.createStringParam("sep101", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep101 = param
del param
param = lastNode.createStringParam("sep102", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep102 = param
del param
param = lastNode.createSeparatorParam("NAME", "Crok_checkerboard_GL v1.0")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.NAME = param
del param
param = lastNode.createStringParam("sep103", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep103 = param
del param
param = lastNode.createStringParam("sep104", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep104 = param
del param
param = lastNode.createSeparatorParam("LINE01", "")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.LINE01 = param
del param
param = lastNode.createStringParam("sep105", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep105 = param
del param
param = lastNode.createStringParam("sep106", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep106 = param
del param
param = lastNode.createSeparatorParam("FR", "ShaderToy 0.8.8")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.FR = param
del param
param = lastNode.createStringParam("sep107", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep107 = param
del param
param = lastNode.createStringParam("sep108", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep108 = param
del param
param = lastNode.createSeparatorParam("CONVERSION", " (Fabrice Fernandez - 2018)")
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setPersistent(False)
param.setEvaluateOnChange(False)
lastNode.CONVERSION = param
del param
param = lastNode.createStringParam("sep109", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep109 = param
del param
param = lastNode.createStringParam("sep110", "")
param.setType(NatronEngine.StringParam.TypeEnum.eStringTypeLabel)
# Add the param to the page
lastNode.Credits.addParam(param)
# Set param properties
param.setHelp("")
param.setAddNewLine(True)
param.setEvaluateOnChange(False)
param.setAnimationEnabled(False)
lastNode.sep110 = param
del param
# Refresh the GUI with the newly created parameters
lastNode.setPagesOrder(['Controls', 'Credits', 'Node', 'Settings'])
lastNode.refreshUserParamsGUI()
del lastNode
# Start of node "Output2"
lastNode = app.createNode("fr.inria.built-in.Output", 1, group)
lastNode.setLabel("Output2")
lastNode.setPosition(4139, 4048)
lastNode.setSize(90, 36)
lastNode.setColor(0.7, 0.7, 0.7)
groupOutput2 = lastNode
del lastNode
# End of node "Output2"
# Start of node "Shadertoy1_2"
lastNode = app.createNode("net.sf.openfx.Shadertoy", 1, group)
lastNode.setScriptName("Shadertoy1_2")
lastNode.setLabel("Shadertoy1_2")
lastNode.setPosition(4139, 3835)
lastNode.setSize(90, 36)
lastNode.setColor(0.3, 0.5, 0.2)
groupShadertoy1_2 = lastNode
param = lastNode.getParam("paramValueFloat0")
if param is not None:
param.setValue(0, 0)
del param
param = lastNode.getParam("paramValueFloat1")
if param is not None:
param.setValue(10, 0)
del param
param = lastNode.getParam("paramValueFloat2")
if param is not None:
param.setValue(3, 0)
del param
param = lastNode.getParam("paramValueFloat3")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramValueVec44")
if param is not None:
param.setValue(1, 0)
param.setValue(1, 1)
param.setValue(1, 2)
param.setValue(1, 3)
del param
param = lastNode.getParam("paramValueVec45")
if param is not None:
param.setValue(0, 0)
param.setValue(0, 1)
param.setValue(0, 2)
param.setValue(0, 3)
del param
param = lastNode.getParam("imageShaderSource")
if param is not None:
param.setValue("//\n//\n// MMMMMMMMMMMMMMMMMMMMMMMMMMMM\n// MM. .MM\n// MM. .MMMMMMMMMMMMMMMMMMMMMM. .MM\n// MM. .MMMMMMMMMMMMMMMMMMMMMMMM. .MM\n// MM. .MMMM MMMMMMM MMM. .MM\n// MM. .MMM MMMMMM MMM. .MM\n// MM. .MmM MMMM MMM. .MM\n// MM. .MMM MM MMM. .MM\n// MM. .MMM M MMM. .MM\n// MM. .MMM MMM. .MM\n// MM. .MMM MMM. .MM\n// MM. .MMM M MMM. .MM\n// MM. .MMM MM MMM. .MM\n// MM. .MMM MMM MMM. .MM\n// MM. .MMM MMMM MMM. .MM\n// MM. .MMMMMMMMMMMMMMMMMMMMMMMM. .MM\n// MM. .MMMMMMMMMMMMMMMMMMMMMM. .MM\n// MM. .MM\n// MMMMMMMMMMMMMMMMMMMMMMMMMMMM\n//\n//\n//\n//\n// Adaptation pour Natron par F. Fernandez\n// Code original : crok_checkerboard Matchbox pour Autodesk Flame\n\n// Adapted to Natron by F.Fernandez\n// Original code : crok_checkerboard Matchbox for Autodesk Flame\n\n\n\nuniform float rot = 0.0; // Rotation : (rotation), min=-10000, max=10000\nuniform float zoom = 10.0; // Zoom : (zoom), min=2.0, max=1000\nuniform float blur = 0.0; // Blur : (blur), min=0.0, max=1000.0\nuniform float Aspect = 1.0; // Aspect : (aspect), min=0.01, max=100\n\nuniform vec4 color1 = vec4(1.0,1.0,1.0,1.0); // Color 1 : (color 1)\nuniform vec4 color2 = vec4(0.0,0.0,0.0,0.0); // Color 2 : (color 2)\n\n\n\n#define PI 3.14159265359\n\nvec2 resolution = vec2(iResolution.x, iResolution.y);\n\n \nvoid mainImage( out vec4 fragColor, in vec2 fragCoord )\n{\n\tvec2 uv = ((fragCoord.xy / resolution.xy) - 0.5);\n\tfloat bl = 0.0;\n\n\tbl += blur; \n\n\tfloat b = bl * zoom / resolution.x;\n\n\tfloat frameRatio = iResolution.x / iResolution.y;\n\tuv.x *= frameRatio;\n\t// degrees to radians conversion\n\tfloat rad_rot = rot * PI / 180.0; \n\n\t// rotation\n\tmat2 rotation = mat2( cos(-rad_rot), -sin(-rad_rot), sin(-rad_rot), cos(-rad_rot));\n\tuv *= rotation;\n\t\n\tuv.x *= Aspect;\n\tuv *= zoom;\n\t\n\t\n vec2 anti_a = sin(PI * uv);\n\tvec2 square = smoothstep( -b, b, anti_a );\n\tsquare = 2.0 * square - 1.0;\t\t\t\t\t\t\n float a = 0.5 * (square.x * square.y) + 0.5;\n\tvec3 c = mix(color1.rgb, color2.rgb, a); \n\tfragColor = vec4(c, mix(color1.a, color2.a, a));\n}")
del param
param = lastNode.getParam("inputEnable0")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable1")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable2")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("inputEnable3")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("bbox")
if param is not None:
param.set("format")
del param
param = lastNode.getParam("NatronParamFormatSize")
if param is not None:
param.setValue(1920, 0)
param.setValue(1080, 1)
del param
param = lastNode.getParam("mouseParams")
if param is not None:
param.setValue(False)
del param
param = lastNode.getParam("paramCount")
if param is not None:
param.setValue(6, 0)
del param
param = lastNode.getParam("paramType0")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName0")
if param is not None:
param.setValue("rot")
del param
param = lastNode.getParam("paramLabel0")
if param is not None:
param.setValue("Rotation :")
del param
param = lastNode.getParam("paramHint0")
if param is not None:
param.setValue("rotation")
del param
param = lastNode.getParam("paramMinFloat0")
if param is not None:
param.setValue(-10000, 0)
del param
param = lastNode.getParam("paramMaxFloat0")
if param is not None:
param.setValue(10000, 0)
del param
param = lastNode.getParam("paramType1")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName1")
if param is not None:
param.setValue("zoom")
del param
param = lastNode.getParam("paramLabel1")
if param is not None:
param.setValue("Zoom :")
del param
param = lastNode.getParam("paramHint1")
if param is not None:
param.setValue("zoom")
del param
param = lastNode.getParam("paramDefaultFloat1")
if param is not None:
param.setValue(10, 0)
del param
param = lastNode.getParam("paramMinFloat1")
if param is not None:
param.setValue(2, 0)
del param
param = lastNode.getParam("paramMaxFloat1")
if param is not None:
param.setValue(1000, 0)
del param
param = lastNode.getParam("paramType2")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName2")
if param is not None:
param.setValue("blur")
del param
param = lastNode.getParam("paramLabel2")
if param is not None:
param.setValue("Blur :")
del param
param = lastNode.getParam("paramHint2")
if param is not None:
param.setValue("blur")
del param
param = lastNode.getParam("paramMinFloat2")
if param is not None:
param.setValue(0, 0)
del param
param = lastNode.getParam("paramMaxFloat2")
if param is not None:
param.setValue(1000, 0)
del param
param = lastNode.getParam("paramType3")
if param is not None:
param.set("float")
del param
param = lastNode.getParam("paramName3")
if param is not None:
param.setValue("Aspect")
del param
param = lastNode.getParam("paramLabel3")
if param is not None:
param.setValue("Aspect :")
del param
param = lastNode.getParam("paramHint3")
if param is not None:
param.setValue("aspect")
del param
param = lastNode.getParam("paramDefaultFloat3")
if param is not None:
param.setValue(1, 0)
del param
param = lastNode.getParam("paramMinFloat3")
if param is not None:
param.setValue(0.01, 0)
del param
param = lastNode.getParam("paramMaxFloat3")
if param is not None:
param.setValue(99.99999999999999, 0)
del param
param = lastNode.getParam("paramType4")
if param is not None:
param.set("vec4")
del param
param = lastNode.getParam("paramName4")
if param is not None:
param.setValue("color1")
del param
param = lastNode.getParam("paramLabel4")
if param is not None:
param.setValue("Color 1 :")
del param
param = lastNode.getParam("paramHint4")
if param is not None:
param.setValue("color 1")
del param
param = lastNode.getParam("paramDefaultVec44")
if param is not None:
param.setValue(1, 0)
param.setValue(1, 1)
param.setValue(1, 2)
param.setValue(1, 3)
del param
param = lastNode.getParam("paramType5")
if param is not None:
param.set("vec4")
del param
param = lastNode.getParam("paramName5")
if param is not None:
param.setValue("color2")
del param
param = lastNode.getParam("paramLabel5")
if param is not None:
param.setValue("Color 2 :")
del param
param = lastNode.getParam("paramHint5")
if param is not None:
param.setValue("color 2")
del param
del lastNode
# End of node "Shadertoy1_2"
# Now that all nodes are created we can connect them together, restore expressions
groupOutput2.connectInput(0, groupShadertoy1_2)
param = groupShadertoy1_2.getParam("paramValueFloat0")
group.getParam("Shadertoy1_2paramValueFloat0").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat1")
group.getParam("Shadertoy1_2paramValueFloat1").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat2")
group.getParam("Shadertoy1_2paramValueFloat2").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueFloat3")
group.getParam("Shadertoy1_2paramValueFloat3").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueVec44")
group.getParam("Shadertoy1_2paramValueVec44").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("paramValueVec45")
group.getParam("Shadertoy1_2paramValueVec45").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("bbox")
group.getParam("Shadertoy1_2bbox").setAsAlias(param)
del param
param = groupShadertoy1_2.getParam("NatronParamFormatChoice")
group.getParam("Shadertoy1_2NatronParamFormatChoice").setAsAlias(param)
del param
try:
extModule = sys.modules["Crok_checkerboard_GLExt"]
except KeyError:
extModule = None
if extModule is not None and hasattr(extModule ,"createInstanceExt") and hasattr(extModule.createInstanceExt,"__call__"):
extModule.createInstanceExt(app,group)
|
31bb94e1d75baa679b843e25022a45bd15433101
|
81e6b06b63ab06bcef6a86a91f17aa4a91ceac0a
|
/capture/noworkflow/tests/prov_definition/test_code_block_definition.py
|
373d4bcc01d4b1f595c92bdde1b00b8e7cddda9e
|
[
"MIT"
] |
permissive
|
gems-uff/noworkflow
|
2abb27fb34d8634f79e025bab8b03ff4cc915111
|
32943ecbed699e9d4967ed17ff066ba005a7c24b
|
refs/heads/master
| 2023-08-09T05:55:11.292961
| 2023-07-20T11:02:59
| 2023-07-20T11:02:59
| 12,629,378
| 119
| 34
|
MIT
| 2023-07-20T11:03:00
| 2013-09-05T21:28:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 15,577
|
py
|
test_code_block_definition.py
|
# Copyright (c) 2016 Universidade Federal Fluminense (UFF)
# Copyright (c) 2016 Polytechnic Institute of New York University.
# This file is part of noWorkflow.
# Please, consult the license terms in the LICENSE file.
"""Test Code Block collection"""
from __future__ import (absolute_import, print_function,
division, unicode_literals)
from ..collection_testcase import CollectionTestCase
from ...now.utils.cross_version import PY35, only
class TestCodeBlockDefinition(CollectionTestCase):
"""Test Code Block collection"""
def test_script(self):
"""Test script collection. Do not ignore comments."""
self.script("# script.py\n"
"a = 2\n"
"# other")
self.compile()
script = self.find_code_component(name="script.py")
self.assertEqual(script.type, "script")
self.assertEqual(script.mode, "w")
self.assertEqual(script.first_char_line, 1)
self.assertEqual(script.first_char_column, 0)
self.assertEqual(script.last_char_line, 3)
self.assertEqual(script.last_char_column, 7)
self.assertEqual(script.container_id, -1)
script_block = self.metascript.code_blocks_store[script.id]
self.assertEqual(script_block.code, "# script.py\na = 2\n# other")
self.assertEqual(script_block.docstring, "")
self.assertTrue(bool(script_block.code_hash))
def test_script_with_docstring(self):
"""Test script collection with docstring"""
self.script("# script.py\n"
"'doc'\n"
"a = 2")
self.compile()
script = self.find_code_component(name="script.py")
self.assertEqual(script.type, "script")
self.assertEqual(script.mode, "w")
self.assertEqual(script.first_char_line, 1)
self.assertEqual(script.first_char_column, 0)
self.assertEqual(script.last_char_line, 3)
self.assertEqual(script.last_char_column, 5)
self.assertEqual(script.container_id, -1)
script_block = self.metascript.code_blocks_store[script.id]
self.assertEqual(script_block.code, "# script.py\n'doc'\na = 2")
self.assertEqual(script_block.docstring, "doc")
self.assertTrue(bool(script_block.code_hash))
def test_function_definition(self):
"""Test function definition collection"""
self.script("# script.py\n"
"def f():\n"
" 'fdoc'\n"
" pass\n")
self.compile()
script = self.find_code_component(name="script.py")
function_def = self.find_code_component(name="f")
self.assertEqual(function_def.type, "function_def")
self.assertEqual(function_def.mode, "w")
self.assertEqual(function_def.first_char_line, 2)
self.assertEqual(function_def.first_char_column, 0)
self.assertEqual(function_def.last_char_line, 4)
self.assertEqual(function_def.last_char_column, 8)
self.assertEqual(function_def.container_id, script.id)
function_def_block = self.metascript.code_blocks_store[function_def.id]
self.assertEqual(function_def_block.code,
"def f():\n"
" 'fdoc'\n"
" pass")
self.assertEqual(function_def_block.docstring, "fdoc")
self.assertTrue(bool(function_def_block.code_hash))
def test_function_definition_with_args(self):
"""Test function definition collection with arguments"""
self.script("# script.py\n"
"def f(x, y=False):\n"
" 'fdoc'\n"
" pass\n")
self.compile()
script = self.find_code_component(name="script.py")
function_def = self.find_code_component(name="f")
var_x = self.find_code_component(name="x")
var_y = self.find_code_component(name="y")
false = self.find_code_component(name="False")
self.assertEqual(function_def.type, "function_def")
self.assertEqual(function_def.mode, "w")
self.assertEqual(function_def.first_char_line, 2)
self.assertEqual(function_def.first_char_column, 0)
self.assertEqual(function_def.last_char_line, 4)
self.assertEqual(function_def.last_char_column, 8)
self.assertEqual(function_def.container_id, script.id)
self.assertEqual(var_x.type, "param")
self.assertEqual(var_x.mode, "w")
self.assertEqual(var_x.first_char_line, 2)
self.assertEqual(var_x.first_char_column, 6)
self.assertEqual(var_x.last_char_line, 2)
self.assertEqual(var_x.last_char_column, 7)
self.assertEqual(var_x.container_id, function_def.id)
self.assertEqual(var_y.type, "param")
self.assertEqual(var_y.mode, "w")
self.assertEqual(var_y.first_char_line, 2)
self.assertEqual(var_y.first_char_column, 9)
self.assertEqual(var_y.last_char_line, 2)
self.assertEqual(var_y.last_char_column, 10)
self.assertEqual(var_y.container_id, function_def.id)
#self.assertEqual(false.type, "literal")
self.assertEqual(false.mode, "r")
self.assertEqual(false.first_char_line, 2)
self.assertEqual(false.first_char_column, 11)
self.assertEqual(false.last_char_line, 2)
self.assertEqual(false.last_char_column, 16)
self.assertEqual(false.container_id, function_def.id)
function_def_block = self.metascript.code_blocks_store[function_def.id]
self.assertEqual(function_def_block.code,
"def f(x, y=False):\n"
" 'fdoc'\n"
" pass")
self.assertEqual(function_def_block.docstring, "fdoc")
self.assertTrue(bool(function_def_block.code_hash))
def test_function_definition_with_decorator(self):
"""Test function definition collection with decorator"""
self.script("# script.py\n"
"def g(x):\n"
" return x\n"
"@g\n"
"def f():\n"
" 'fdoc'\n"
" pass\n")
self.compile()
script = self.find_code_component(name="script.py")
function_def = self.find_code_component(name="f")
self.assertEqual(function_def.type, "function_def")
self.assertEqual(function_def.mode, "w")
self.assertEqual(function_def.first_char_line, 4)
self.assertEqual(function_def.first_char_column, 0)
self.assertEqual(function_def.last_char_line, 7)
self.assertEqual(function_def.last_char_column, 8)
self.assertEqual(function_def.container_id, script.id)
function_def_block = self.metascript.code_blocks_store[function_def.id]
self.assertEqual(function_def_block.code,
"@g\n"
"def f():\n"
" 'fdoc'\n"
" pass")
self.assertEqual(function_def_block.docstring, "fdoc")
self.assertTrue(bool(function_def_block.code_hash))
@only(PY35)
def test_async_function_definition(self):
"""Test async function definition collection"""
self.script("# script.py\n"
"async def f():\n"
" 'fdoc'\n"
" pass\n")
self.compile()
script = self.find_code_component(name="script.py")
function_def = self.find_code_component(name="f")
self.assertEqual(function_def.type, "function_def")
self.assertEqual(function_def.mode, "w")
self.assertEqual(function_def.first_char_line, 2)
self.assertEqual(function_def.first_char_column, 0)
self.assertEqual(function_def.last_char_line, 4)
self.assertEqual(function_def.last_char_column, 8)
self.assertEqual(function_def.container_id, script.id)
function_def_block = self.metascript.code_blocks_store[function_def.id]
self.assertEqual(function_def_block.code,
"async def f():\n"
" 'fdoc'\n"
" pass")
self.assertEqual(function_def_block.docstring, "fdoc")
self.assertTrue(bool(function_def_block.code_hash))
def test_class_definition(self):
"""Test class definition collection"""
self.script("# script.py\n"
"class C():\n"
" 'cdoc'\n"
" pass\n")
self.compile()
script = self.find_code_component(name="script.py")
class_def = self.find_code_component(name="C")
self.assertEqual(class_def.type, "class_def")
self.assertEqual(class_def.mode, "w")
self.assertEqual(class_def.first_char_line, 2)
self.assertEqual(class_def.first_char_column, 0)
self.assertEqual(class_def.last_char_line, 4)
self.assertEqual(class_def.last_char_column, 8)
self.assertEqual(class_def.container_id, script.id)
class_def_block = self.metascript.code_blocks_store[class_def.id]
self.assertEqual(class_def_block.code,
"class C():\n"
" 'cdoc'\n"
" pass")
self.assertEqual(class_def_block.docstring, "cdoc")
self.assertTrue(bool(class_def_block.code_hash))
def test_method_definition(self):
"""Test method definition collection"""
self.script("# script.py\n"
"class C():\n"
" 'cdoc'\n"
" def f(self):\n"
" 'mdoc'\n"
" pass")
self.compile()
class_def = self.find_code_component(name="C")
method_def = self.find_code_component(name="f")
self.assertEqual(method_def.type, "function_def")
self.assertEqual(method_def.mode, "w")
self.assertEqual(method_def.first_char_line, 4)
self.assertEqual(method_def.first_char_column, 4)
self.assertEqual(method_def.last_char_line, 6)
self.assertEqual(method_def.last_char_column, 12)
self.assertEqual(method_def.container_id, class_def.id)
method_def_block = self.metascript.code_blocks_store[method_def.id]
self.assertEqual(method_def_block.code,
"def f(self):\n"
" 'mdoc'\n"
" pass")
self.assertEqual(method_def_block.docstring, "mdoc")
self.assertTrue(bool(method_def_block.code_hash))
def test_closure_definition(self):
"""Test closure definition collection"""
self.script("# script.py\n"
"def f():\n"
" 'fdoc'\n"
" def c():\n"
" 'cdoc'\n"
" pass")
self.compile()
function_def = self.find_code_component(name="f")
closure_def = self.find_code_component(name="c")
self.assertEqual(closure_def.type, "function_def")
self.assertEqual(closure_def.mode, "w")
self.assertEqual(closure_def.first_char_line, 4)
self.assertEqual(closure_def.first_char_column, 4)
self.assertEqual(closure_def.last_char_line, 6)
self.assertEqual(closure_def.last_char_column, 12)
self.assertEqual(closure_def.container_id, function_def.id)
closure_def_block = self.metascript.code_blocks_store[closure_def.id]
self.assertEqual(closure_def_block.code,
"def c():\n"
" 'cdoc'\n"
" pass")
self.assertEqual(closure_def_block.docstring, "cdoc")
self.assertTrue(bool(closure_def_block.code_hash))
def test_class_definition_with_base(self):
"""Test class definition collection with arguments"""
self.script("# script.py\n"
"class C(object):\n"
" 'cdoc'\n"
" pass\n")
self.compile()
script = self.find_code_component(name="script.py")
class_def = self.find_code_component(name="C")
var_object = self.find_code_component(name="object")
self.assertEqual(class_def.type, "class_def")
self.assertEqual(class_def.mode, "w")
self.assertEqual(class_def.first_char_line, 2)
self.assertEqual(class_def.first_char_column, 0)
self.assertEqual(class_def.last_char_line, 4)
self.assertEqual(class_def.last_char_column, 8)
self.assertEqual(class_def.container_id, script.id)
self.assertEqual(var_object.type, "name")
self.assertEqual(var_object.mode, "r")
self.assertEqual(var_object.first_char_line, 2)
self.assertEqual(var_object.first_char_column, 8)
self.assertEqual(var_object.last_char_line, 2)
self.assertEqual(var_object.last_char_column, 14)
self.assertEqual(var_object.container_id, class_def.id)
class_def_block = self.metascript.code_blocks_store[class_def.id]
self.assertEqual(class_def_block.code,
"class C(object):\n"
" 'cdoc'\n"
" pass")
self.assertEqual(class_def_block.docstring, "cdoc")
self.assertTrue(bool(class_def_block.code_hash))
@only(PY35)
def test_class_definition_with_metaclass(self):
"""Test class definition collection with arguments"""
self.script("# script.py\n"
"class C(object, metaclass=type):\n"
" 'cdoc'\n"
" pass\n")
self.compile()
script = self.find_code_component(name="script.py")
class_def = self.find_code_component(name="C")
var_object = self.find_code_component(name="object")
var_type = self.find_code_component(name="type")
self.assertEqual(class_def.type, "class_def")
self.assertEqual(class_def.mode, "w")
self.assertEqual(class_def.first_char_line, 2)
self.assertEqual(class_def.first_char_column, 0)
self.assertEqual(class_def.last_char_line, 4)
self.assertEqual(class_def.last_char_column, 8)
self.assertEqual(class_def.container_id, script.id)
self.assertEqual(var_object.type, "name")
self.assertEqual(var_object.mode, "r")
self.assertEqual(var_object.first_char_line, 2)
self.assertEqual(var_object.first_char_column, 8)
self.assertEqual(var_object.last_char_line, 2)
self.assertEqual(var_object.last_char_column, 14)
self.assertEqual(var_object.container_id, class_def.id)
self.assertEqual(var_type.type, "name")
self.assertEqual(var_type.mode, "r")
self.assertEqual(var_type.first_char_line, 2)
self.assertEqual(var_type.first_char_column, 26)
self.assertEqual(var_type.last_char_line, 2)
self.assertEqual(var_type.last_char_column, 30)
self.assertEqual(var_type.container_id, class_def.id)
class_def_block = self.metascript.code_blocks_store[class_def.id]
self.assertEqual(class_def_block.code,
"class C(object, metaclass=type):\n"
" 'cdoc'\n"
" pass")
self.assertEqual(class_def_block.docstring, "cdoc")
self.assertTrue(bool(class_def_block.code_hash))
|
fa8c6065b68bbbdf4235ab8090b5226ba052c6bb
|
9b1eda0abdc5dea7c6e9695ff4e1098abe0a708b
|
/src/textual/widgets/_radio_button.py
|
4ba5763fd349aec6811712e6d8aecca2c8b17d9b
|
[
"MIT"
] |
permissive
|
Textualize/textual
|
b8cf4b5d18069fccc7623b3116436f479e1ef446
|
b74ac1e47fdd16133ca567390c99ea19de278c5a
|
refs/heads/main
| 2023-08-30T21:40:21.563823
| 2023-08-30T10:18:27
| 2023-08-30T10:18:27
| 355,959,597
| 14,818
| 588
|
MIT
| 2023-09-14T20:22:02
| 2021-04-08T15:24:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,022
|
py
|
_radio_button.py
|
"""Provides a radio button widget."""
from __future__ import annotations
from ._toggle_button import ToggleButton
class RadioButton(ToggleButton):
"""A radio button widget that represents a boolean value.
Note:
A `RadioButton` is best used within a [RadioSet][textual.widgets.RadioSet].
"""
BUTTON_INNER = "\u25CF"
"""The character used for the inside of the button."""
class Changed(ToggleButton.Changed):
"""Posted when the value of the radio button changes.
This message can be handled using an `on_radio_button_changed` method.
"""
@property
def radio_button(self) -> RadioButton:
"""The radio button that was changed."""
assert isinstance(self._toggle_button, RadioButton)
return self._toggle_button
@property
def control(self) -> RadioButton:
"""Alias for [Changed.radio_button][textual.widgets.RadioButton.Changed.radio_button]."""
return self.radio_button
|
fa95e7478abf4a1674c1ad8ec0cd88bcec5b0618
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/nxos/tests/ShowInterface/cli/equal/golden10_expected.py
|
be36b1696868f97286844821d99e4e3e02d1b166
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,717
|
py
|
golden10_expected.py
|
expected_output = {
"Ethernet1/1": {
"port_channel": {"port_channel_member": False},
"link_state": "down",
"oper_status": "down",
"enabled": True,
"dedicated_interface": True,
"types": "1000/10000 Ethernet",
"mac_address": "8c60.4fff.ea8f",
"phys_address": "8c60.4fff.ea8f",
"mtu": 1500,
"bandwidth": 10000000,
"delay": 10,
"reliability": "255/255",
"txload": "1/255",
"rxload": "1/255",
"encapsulations": {"encapsulation": "arpa"},
"medium": "broadcast",
"port_mode": "access",
"media_type": "1G",
"duplex_mode": "auto",
"port_speed": "10",
"port_speed_unit": "Gb/s",
"beacon": "off",
"flow_control": {"receive": False, "send": False},
"switchport_monitor": "off",
"ethertype": "0x8100",
"last_link_flapped": "never",
"last_clear_counters": "never",
"interface_reset": 0,
"counters": {
"rate": {
"load_interval": 30,
"in_rate": 0,
"in_rate_pkts": 0,
"out_rate": 0,
"out_rate_pkts": 0,
"in_rate_bps": 0,
"in_rate_pps": 0,
"out_rate_bps": 0,
"out_rate_pps": 0,
},
"rx": True,
"in_unicast_pkts": 0,
"in_multicast_pkts": 0,
"in_broadcast_pkts": 0,
"last_clear": "never",
"in_pkts": 0,
"in_octets": 0,
"in_storm_suppression_bytes": 0,
"in_jumbo_packets": 0,
"in_runts": 0,
"in_oversize_frame": 0,
"in_crc_errors": 0,
"in_no_buffer": 0,
"in_errors": 0,
"in_short_frame": 0,
"in_overrun": 0,
"in_underrun": 0,
"in_ignored": 0,
"in_watchdog": 0,
"in_bad_etype_drop": 0,
"in_unknown_protos": 0,
"in_if_down_drop": 0,
"in_with_dribble": 0,
"in_discard": 0,
"in_mac_pause_frames": 0,
"tx": True,
"out_unicast_pkts": 0,
"out_multicast_pkts": 0,
"out_broadcast_pkts": 0,
"out_pkts": 0,
"out_octets": 0,
"out_jumbo_packets": 0,
"out_errors": 0,
"out_collision": 0,
"out_deferred": 0,
"out_late_collision": 0,
"out_lost_carrier": 0,
"out_no_carrier": 0,
"out_babble": 0,
"out_discard": 0,
"out_mac_pause_frames": 0,
},
}
}
|
58b00449cbfe80a8c1f376d30f44a05580db811d
|
8a2474f61a49b0e24812456b34f59948b756a94e
|
/autotest/test_gwt_adv01_fmi.py
|
84faee0f9a0c962096335f00798361f644a00a06
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
MODFLOW-USGS/modflow6
|
6e913abcab5c23686ed05b1cceac85f90282955d
|
43f6198125867c487eedc64b17e9adaceb73f5ab
|
refs/heads/master
| 2023-09-01T20:51:30.970467
| 2023-06-28T20:17:59
| 2023-06-28T20:17:59
| 116,149,490
| 158
| 111
|
NOASSERTION
| 2023-09-14T17:02:58
| 2018-01-03T15:00:55
|
Fortran
|
UTF-8
|
Python
| false
| false
| 16,671
|
py
|
test_gwt_adv01_fmi.py
|
"""
MODFLOW 6 Autotest
Test the advection schemes in the gwt advection package for a one-dimensional
model grid of square cells.
"""
import os
import flopy
import numpy as np
import pytest
from flopy.utils.binaryfile import write_budget, write_head
from flopy.utils.gridutil import uniform_flow_field
from framework import TestFramework
from simulation import TestSimulation
ex = ["adv01a_fmi", "adv01b_fmi", "adv01c_fmi"]
scheme = ["upstream", "central", "tvd"]
def build_model(idx, dir):
nlay, nrow, ncol = 1, 1, 100
nper = 1
perlen = [5.0]
nstp = [200]
tsmult = [1.0]
steady = [True]
delr = 1.0
delc = 1.0
top = 1.0
botm = [0.0]
strt = 1.0
hk = 1.0
laytyp = 0
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-6, 1e-6, 1.0
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(
sim, time_units="DAYS", nper=nper, perioddata=tdis_rc
)
# create gwt model
gwtname = "gwt_" + name
gwt = flopy.mf6.MFModel(
sim,
model_type="gwt6",
modelname=gwtname,
model_nam_file=f"{gwtname}.nam",
)
gwt.name_file.save_flows = True
# create iterative model solution and register the gwt model with it
imsgwt = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="NONE",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
filename=f"{gwtname}.ims",
)
sim.register_ims_package(imsgwt, [gwt.name])
dis = flopy.mf6.ModflowGwtdis(
gwt,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=1,
filename=f"{gwtname}.dis",
)
# initial conditions
ic = flopy.mf6.ModflowGwtic(gwt, strt=0.0, filename=f"{gwtname}.ic")
# advection
adv = flopy.mf6.ModflowGwtadv(
gwt, scheme=scheme[idx], filename=f"{gwtname}.adv"
)
# mass storage and transfer
mst = flopy.mf6.ModflowGwtmst(gwt, porosity=0.1)
# sources
sourcerecarray = [("WEL-1", "AUX", "CONCENTRATION")]
ssm = flopy.mf6.ModflowGwtssm(
gwt, sources=sourcerecarray, filename=f"{gwtname}.ssm"
)
# create a heads file with head equal top
fname = os.path.join(ws, "myhead.hds")
with open(fname, "wb") as fbin:
for kstp in range(1): # nstp[0]):
write_head(fbin, top * np.ones((nrow, ncol)), kstp=kstp + 1)
# create a budget file
qx = 1.0
qy = 0.0
qz = 0.0
shape = (nlay, nrow, ncol)
spdis, flowja = uniform_flow_field(qx, qy, qz, shape)
dt = np.dtype(
[
("ID1", np.int32),
("ID2", np.int32),
("FLOW", np.float64),
("CONCENTRATION", np.float64),
]
)
wel = np.array([(0 + 1, 0 + 1, 1.0, 1.0)], dtype=dt)
chd = np.array([(ncol - 1 + 1, 0 + 1, -1.0, 0.0)], dtype=dt)
dt = np.dtype(
[
("ID1", np.int32),
("ID2", np.int32),
("FLOW", np.float64),
("SATURATION", np.float64),
]
)
sat = np.array(
[(i, i, 0.0, 1.0) for i in range(nlay * nrow * ncol)], dtype=dt
)
fname = os.path.join(ws, "mybudget.bud")
with open(fname, "wb") as fbin:
for kstp in range(1): # nstp[0]):
write_budget(fbin, flowja, kstp=kstp + 1)
write_budget(
fbin, spdis, text=" DATA-SPDIS", imeth=6, kstp=kstp + 1
)
write_budget(
fbin, sat, text=" DATA-SAT", imeth=6, kstp=kstp + 1
)
write_budget(
fbin,
wel,
text=" WEL",
imeth=6,
text2id2=" WEL-1",
kstp=kstp + 1,
)
write_budget(
fbin,
chd,
text=" CHD",
imeth=6,
text2id2=" CHD-1",
kstp=kstp + 1,
)
fbin.close()
# flow model interface
packagedata = [
("GWFBUDGET", "mybudget.bud", None),
("GWFHEAD", "myhead.hds", None),
]
fmi = flopy.mf6.ModflowGwtfmi(gwt, packagedata=packagedata)
# output control
oc = flopy.mf6.ModflowGwtoc(
gwt,
budget_filerecord=f"{gwtname}.cbc",
concentration_filerecord=f"{gwtname}.ucn",
concentrationprintrecord=[
("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")
],
saverecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")],
printrecord=[("CONCENTRATION", "LAST"), ("BUDGET", "LAST")],
)
obs_data = {
"conc_obs.csv": [
("(1-1-10)", "CONCENTRATION", (0, 0, 9)),
("(1-1-50)", "CONCENTRATION", (0, 0, 49)),
],
"flow_obs.csv": [
("c10-c11", "FLOW-JA-FACE", (0, 0, 9), (0, 0, 10)),
("c50-c51", "FLOW-JA-FACE", (0, 0, 49), (0, 0, 50)),
("c99-c100", "FLOW-JA-FACE", (0, 0, 98), (0, 0, 99)),
],
}
obs_package = flopy.mf6.ModflowUtlobs(
gwt,
pname="conc_obs",
filename=f"{gwtname}.obs",
digits=10,
print_input=True,
continuous=obs_data,
)
return sim, None
def eval_transport(sim):
print("evaluating transport...")
name = ex[sim.idxsim]
gwtname = "gwt_" + name
fpth = os.path.join(sim.simpath, f"{gwtname}.ucn")
try:
cobj = flopy.utils.HeadFile(
fpth, precision="double", text="CONCENTRATION"
)
conc = cobj.get_data()
except:
assert False, f'could not load data from "{fpth}"'
# This is the answer to this problem. These concentrations are for
# time step 200.
cres1 = [
[
[
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
9.99999999e-01,
9.99999997e-01,
9.99999991e-01,
9.99999971e-01,
9.99999914e-01,
9.99999761e-01,
9.99999372e-01,
9.99998435e-01,
9.99996286e-01,
9.99991577e-01,
9.99981712e-01,
9.99961893e-01,
9.99923632e-01,
9.99852532e-01,
9.99725120e-01,
9.99504599e-01,
9.99135431e-01,
9.98536850e-01,
9.97595635e-01,
9.96158712e-01,
9.94026505e-01,
9.90948130e-01,
9.86619748e-01,
9.80687319e-01,
9.72754814e-01,
9.62398489e-01,
9.49187176e-01,
9.32707801e-01,
9.12594513e-01,
8.88559134e-01,
8.60420154e-01,
8.28127324e-01,
7.91779115e-01,
7.51630867e-01,
7.08092322e-01,
6.61714306e-01,
6.13165405e-01,
5.63200494e-01,
5.12623768e-01,
4.62249349e-01,
4.12862664e-01,
3.65185517e-01,
3.19847250e-01,
2.77363614e-01,
2.38124183e-01,
2.02388273e-01,
1.70288648e-01,
1.41841739e-01,
1.16962748e-01,
9.54838854e-02,
7.71740354e-02,
6.17583229e-02,
4.89363652e-02,
3.83983188e-02,
2.98381826e-02,
2.29641338e-02,
1.75059339e-02,
1.32196416e-02,
9.89000005e-03,
7.33093269e-03,
5.38459977e-03,
3.91944360e-03,
2.82760119e-03,
2.02199855e-03,
1.43337156e-03,
1.00739149e-03,
7.02013580e-04,
4.85116958e-04,
3.32465664e-04,
2.25991387e-04,
1.52379541e-04,
1.01928496e-04,
6.76460984e-05,
4.45462926e-05,
2.91101871e-05,
1.88792800e-05,
1.21527525e-05,
7.76522212e-06,
4.92565188e-06,
3.10201677e-06,
1.93969988e-06,
1.20440812e-06,
7.42676511e-07,
4.54831064e-07,
2.76669882e-07,
1.67174989e-07,
1.00349240e-07,
5.98446532e-08,
3.54600737e-08,
]
]
]
cres1 = np.array(cres1)
cres2 = [
[
[
9.99996617e-01,
1.00001184e00,
1.00000294e00,
9.99972914e-01,
9.99992627e-01,
1.00004237e00,
1.00002081e00,
9.99945149e-01,
9.99952654e-01,
1.00005669e00,
1.00008810e00,
9.99966402e-01,
9.99865541e-01,
9.99967791e-01,
1.00015792e00,
1.00014755e00,
9.99895530e-01,
9.99724106e-01,
9.99916592e-01,
1.00029941e00,
1.00038455e00,
9.99960678e-01,
9.99433053e-01,
9.99453350e-01,
1.00018163e00,
1.00097923e00,
1.00093550e00,
9.99790199e-01,
9.98371554e-01,
9.98054584e-01,
9.99598363e-01,
1.00229288e00,
1.00416575e00,
1.00323035e00,
9.98995210e-01,
9.93234271e-01,
9.89448228e-01,
9.91206357e-01,
1.00016889e00,
1.01473298e00,
1.02990960e00,
1.03846239e00,
1.03282855e00,
1.00710727e00,
9.58480908e-01,
8.87726436e-01,
7.98820097e-01,
6.97900399e-01,
5.91969549e-01,
4.87686471e-01,
3.90487541e-01,
3.04127133e-01,
2.30608327e-01,
1.70400015e-01,
1.22812141e-01,
8.64138068e-02,
5.94120233e-02,
3.99463958e-02,
2.62868102e-02,
1.69426845e-02,
1.07033555e-02,
6.63198283e-03,
4.03300421e-03,
2.40844447e-03,
1.41323306e-03,
8.15254552e-04,
4.62589305e-04,
2.58303233e-04,
1.42001900e-04,
7.68911977e-05,
4.10256980e-05,
2.15775541e-05,
1.11912143e-05,
5.72578796e-06,
2.89083689e-06,
1.44073067e-06,
7.09001789e-07,
3.44624235e-07,
1.65501321e-07,
7.85475047e-08,
3.68512253e-08,
1.70949923e-08,
7.84310280e-09,
3.55966819e-09,
1.59856594e-09,
7.10467596e-10,
3.12565151e-10,
1.36146377e-10,
5.87252052e-11,
2.50886169e-11,
1.06179506e-11,
4.45237718e-12,
1.85013624e-12,
7.61982955e-13,
3.11095972e-13,
1.25908830e-13,
5.05704707e-14,
2.00370648e-14,
8.15003576e-15,
2.57563506e-15,
]
]
]
cres2 = np.array(cres2)
cres3 = [
[
[
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
1.00000000e00,
9.99999999e-01,
9.99999997e-01,
9.99999991e-01,
9.99999975e-01,
9.99999926e-01,
9.99999789e-01,
9.99999407e-01,
9.99998374e-01,
9.99995665e-01,
9.99988785e-01,
9.99971918e-01,
9.99932078e-01,
9.99841550e-01,
9.99643930e-01,
9.99229970e-01,
9.98398720e-01,
9.96800070e-01,
9.93857995e-01,
9.88681096e-01,
9.79978744e-01,
9.66015902e-01,
9.44652308e-01,
9.13514114e-01,
8.70328697e-01,
8.13410724e-01,
7.42224214e-01,
6.57879960e-01,
5.63390876e-01,
4.63530320e-01,
3.64233335e-01,
2.71628522e-01,
1.90935412e-01,
1.25541007e-01,
7.65316248e-02,
4.28052252e-02,
2.16851758e-02,
9.78976172e-03,
3.85613094e-03,
1.28872611e-03,
3.52070089e-04,
7.49188445e-05,
1.17688715e-05,
1.33952025e-06,
1.08174095e-07,
-4.82019087e-08,
-5.67180537e-08,
-4.65251289e-08,
-3.25511455e-08,
-1.94644548e-08,
-9.78876693e-09,
-4.07380361e-09,
-1.38097809e-09,
-3.72934181e-10,
-7.83508455e-11,
-1.26040926e-11,
-1.48260453e-12,
4.10392230e-14,
2.44993743e-13,
2.46295025e-13,
1.90964563e-13,
1.03476379e-13,
3.96502895e-14,
1.04500247e-14,
2.00830327e-15,
4.70831032e-16,
3.38440506e-16,
2.49848438e-16,
1.83245111e-16,
1.32361223e-16,
9.39406563e-17,
6.54891851e-17,
4.48667613e-17,
3.02333440e-17,
2.00567815e-17,
1.31110206e-17,
8.45177289e-18,
5.37610069e-18,
3.37597383e-18,
]
]
]
cres3 = np.array(cres3)
creslist = [cres1, cres2, cres3]
assert np.allclose(
creslist[sim.idxsim], conc
), "simulated concentrations do not match with known solution."
@pytest.mark.parametrize(
"idx, name",
list(enumerate(ex)),
)
def test_mf6model(idx, name, function_tmpdir, targets):
ws = str(function_tmpdir)
test = TestFramework()
test.build(build_model, idx, ws)
test.run(
TestSimulation(
name=name, exe_dict=targets, exfunc=eval_transport, idxsim=idx
),
ws,
)
|
0630fcfec51aeb0633fd71acbf8e29ced45da02a
|
522e6bf7de83177cf2288ff4f6310ee7484770cd
|
/tests/conftest.py
|
6f0c1be6ed20ddf83be25e4e1179b6091a1abcf7
|
[
"MIT"
] |
permissive
|
alisaifee/limits
|
a123f97d72904f924c23536746f6972b218df95a
|
ada96bb4afc9729b4aac2552209a78428a27c313
|
refs/heads/master
| 2023-09-04T05:04:44.284239
| 2023-08-31T14:36:32
| 2023-08-31T14:36:32
| 28,948,972
| 265
| 57
|
MIT
| 2023-09-11T20:29:47
| 2015-01-08T04:34:40
|
Python
|
UTF-8
|
Python
| false
| false
| 8,627
|
py
|
conftest.py
|
import os
import platform
import socket
import time
import etcd3
import pymemcache
import pymemcache.client
import pymongo
import pytest
import redis
import redis.sentinel
def check_redis_cluster_ready(host, port):
try:
return redis.Redis(host, port).cluster("info")["cluster_state"] == "ok"
except Exception:
return False
def check_redis_auth_cluster_ready(host, port):
try:
return (
redis.Redis(host, port, password="sekret").cluster("info")["cluster_state"]
== "ok"
)
except Exception:
return False
def check_redis_ssl_cluster_ready(host, port):
storage_url = (
"rediss://localhost:8301/?ssl_cert_reqs=required"
"&ssl_keyfile=./tests/tls/client.key"
"&ssl_certfile=./tests/tls/client.crt"
"&ssl_ca_certs=./tests/tls/ca.crt"
)
try:
return (
redis.Redis.from_url(storage_url).cluster("info")["cluster_state"] == "ok"
)
except Exception:
return False
def check_sentinel_ready(host, port):
try:
return redis.sentinel.Sentinel([(host, port)]).master_for("mymaster").ping()
except: # noqa
return False
def check_sentinel_auth_ready(host, port):
try:
return (
redis.sentinel.Sentinel(
[(host, port)],
sentinel_kwargs={"password": "sekret"},
password="sekret",
)
.master_for("mymaster")
.ping()
)
except: # noqa
return False
def check_mongo_ready(host, port):
try:
pymongo.MongoClient("mongodb://localhost:37017").server_info()
return True
except: # noqa
return False
def check_etcd_ready(host, port):
try:
etcd3.client(host, port).status()
return True
except: # noqa
return False
@pytest.fixture(scope="session")
def host_ip_env():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("10.255.255.255", 1))
ip = s.getsockname()[0]
except Exception:
ip = "127.0.0.1"
finally:
s.close()
os.environ["HOST_IP"] = str(ip)
@pytest.fixture(scope="session")
def docker_services(host_ip_env, docker_services):
return docker_services
@pytest.fixture(scope="session")
def etcd_client(docker_services):
docker_services.start("etcd")
docker_services.wait_for_service("etcd", 2379, check_etcd_ready)
if os.environ.get("CI") == "True":
time.sleep(5)
return etcd3.client()
@pytest.fixture(scope="session")
def redis_basic_client(docker_services):
docker_services.start("redis-basic")
return redis.StrictRedis("localhost", 7379)
@pytest.fixture(scope="session")
def redis_uds_client(docker_services):
if platform.system().lower() == "darwin":
pytest.skip("Fixture not supported on OSX")
docker_services.start("redis-uds")
return redis.from_url("unix:///tmp/limits.redis.sock")
@pytest.fixture(scope="session")
def redis_auth_client(docker_services):
docker_services.start("redis-auth")
return redis.from_url("redis://:sekret@localhost:7389")
@pytest.fixture(scope="session")
def redis_ssl_client(docker_services):
docker_services.start("redis-ssl")
storage_url = (
"rediss://localhost:8379/0?ssl_cert_reqs=required"
"&ssl_keyfile=./tests/tls/client.key"
"&ssl_certfile=./tests/tls/client.crt"
"&ssl_ca_certs=./tests/tls/ca.crt"
)
return redis.from_url(storage_url)
@pytest.fixture(scope="session")
def redis_cluster_client(docker_services):
docker_services.start("redis-cluster-init")
docker_services.wait_for_service("redis-cluster-6", 7006, check_redis_cluster_ready)
if os.environ.get("CI") == "True":
time.sleep(10)
return redis.cluster.RedisCluster("localhost", 7001)
@pytest.fixture(scope="session")
def redis_auth_cluster_client(docker_services):
docker_services.start("redis-cluster-auth-init")
docker_services.wait_for_service(
"redis-cluster-auth-3", 8402, check_redis_auth_cluster_ready
)
if os.environ.get("CI") == "True":
time.sleep(10)
return redis.cluster.RedisCluster("localhost", 8400, password="sekret")
@pytest.fixture(scope="session")
def redis_ssl_cluster_client(docker_services):
docker_services.start("redis-ssl-cluster-init")
docker_services.wait_for_service(
"redis-ssl-cluster-6", 8306, check_redis_ssl_cluster_ready
)
if os.environ.get("CI") == "True":
time.sleep(10)
storage_url = (
"rediss://localhost:8301/?ssl_cert_reqs=required"
"&ssl_keyfile=./tests/tls/client.key"
"&ssl_certfile=./tests/tls/client.crt"
"&ssl_ca_certs=./tests/tls/ca.crt"
)
return redis.cluster.RedisCluster.from_url(storage_url)
@pytest.fixture(scope="session")
def redis_sentinel_client(docker_services):
docker_services.start("redis-sentinel")
docker_services.wait_for_service("redis-sentinel", 26379, check_sentinel_ready)
return redis.sentinel.Sentinel([("localhost", 26379)])
@pytest.fixture(scope="session")
def redis_sentinel_auth_client(docker_services):
docker_services.start("redis-sentinel-auth")
docker_services.wait_for_service(
"redis-sentinel-auth", 26379, check_sentinel_auth_ready
)
return redis.sentinel.Sentinel(
[("localhost", 36379)],
sentinel_kwargs={"password": "sekret"},
password="sekret",
)
@pytest.fixture(scope="session")
def memcached_client(docker_services):
docker_services.start("memcached-1")
return pymemcache.Client(("localhost", 22122))
@pytest.fixture(scope="session")
def memcached_cluster_client(docker_services):
docker_services.start("memcached-1")
docker_services.start("memcached-2")
return pymemcache.client.HashClient([("localhost", 22122), ("localhost", 22123)])
@pytest.fixture(scope="session")
def memcached_uds_client(docker_services):
if platform.system().lower() == "darwin":
pytest.skip("Fixture not supported on OSX")
docker_services.start("memcached-uds")
return pymemcache.Client("/tmp/limits.memcached.sock")
@pytest.fixture(scope="session")
def mongodb_client(docker_services):
docker_services.start("mongodb")
docker_services.wait_for_service("mongodb", 27017, check_mongo_ready)
return pymongo.MongoClient("mongodb://localhost:37017")
@pytest.fixture
def memcached(memcached_client):
memcached_client.flush_all()
return memcached_client
@pytest.fixture
def memcached_uds(memcached_uds_client):
memcached_uds_client.flush_all()
return memcached_uds_client
@pytest.fixture
def memcached_cluster(memcached_cluster_client):
memcached_cluster_client.flush_all()
return memcached_cluster_client
@pytest.fixture
def redis_basic(redis_basic_client):
redis_basic_client.flushall()
return redis_basic
@pytest.fixture
def redis_ssl(redis_ssl_client):
redis_ssl_client.flushall()
return redis_ssl_client
@pytest.fixture
def redis_auth(redis_auth_client):
redis_auth_client.flushall()
return redis_auth_client
@pytest.fixture
def redis_uds(redis_uds_client):
redis_uds_client.flushall()
return redis_uds_client
@pytest.fixture
def redis_cluster(redis_cluster_client):
redis_cluster_client.flushall()
return redis_cluster_client
@pytest.fixture
def redis_auth_cluster(redis_auth_cluster_client):
redis_auth_cluster_client.flushall()
return redis_auth_cluster_client
@pytest.fixture
def redis_ssl_cluster(redis_ssl_cluster_client):
redis_ssl_cluster_client.flushall()
return redis_ssl_cluster_client
@pytest.fixture
def redis_sentinel(redis_sentinel_client):
redis_sentinel_client.master_for("mymaster").flushall()
return redis_sentinel
@pytest.fixture
def redis_sentinel_auth(redis_sentinel_auth_client):
redis_sentinel_auth_client.master_for("mymaster").flushall()
return redis_sentinel_auth_client
@pytest.fixture
def mongodb(mongodb_client):
mongodb_client.limits.windows.drop()
mongodb_client.limits.counters.drop()
return mongodb_client
@pytest.fixture
def etcd(etcd_client):
etcd_client.delete_prefix("limits/")
return etcd_client
@pytest.fixture(scope="session")
def docker_services_project_name():
return "limits"
@pytest.fixture(scope="session")
def docker_compose_files(pytestconfig):
"""Get the docker-compose.yml absolute path.
Override this fixture in your tests if you need a custom location.
"""
return ["docker-compose.yml"]
|
c781c5feabf4b2b6493740836120589acb9f35aa
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/sharepoint/base_entity.py
|
f08e9719b1bee4e5276de9ac4a43e38936d59db1
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,343
|
py
|
base_entity.py
|
from office365.runtime.client_object import ClientObject
from office365.runtime.queries.delete_entity import DeleteEntityQuery
from office365.runtime.queries.update_entity import UpdateEntityQuery
class BaseEntity(ClientObject):
"""SharePoint specific entity"""
def with_credentials(self, credentials):
"""
:type self: T
:type credentials: UserCredential or ClientCredential
"""
self.context.with_credentials(credentials)
return self
def delete_object(self):
"""The recommended way to delete a SharePoint entity"""
qry = DeleteEntityQuery(self)
self.context.add_query(qry)
self.remove_from_parent_collection()
return self
def update(self, *args):
"""The recommended way to update a SharePoint entity"""
qry = UpdateEntityQuery(self)
self.context.add_query(qry)
return self
@property
def context(self):
"""
:rtype: office365.sharepoint.client_context.ClientContext
"""
return self._context
@property
def entity_type_name(self):
if self._entity_type_name is None:
self._entity_type_name = ".".join(["SP", type(self).__name__])
return self._entity_type_name
@property
def property_ref_name(self):
return "Id"
|
387b586bbde5d22b5d04eabbc7e47bddd40a657c
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-roma/huaweicloudsdkroma/v2/model/show_app_bound_app_quota_response.py
|
b2bc4f562987dc7f119ecfe46248051cf47f4608
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 10,881
|
py
|
show_app_bound_app_quota_response.py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowAppBoundAppQuotaResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'app_quota_id': 'str',
'name': 'str',
'call_limits': 'int',
'time_unit': 'str',
'time_interval': 'int',
'remark': 'str',
'reset_time': 'str',
'create_time': 'datetime',
'bound_app_num': 'int'
}
attribute_map = {
'app_quota_id': 'app_quota_id',
'name': 'name',
'call_limits': 'call_limits',
'time_unit': 'time_unit',
'time_interval': 'time_interval',
'remark': 'remark',
'reset_time': 'reset_time',
'create_time': 'create_time',
'bound_app_num': 'bound_app_num'
}
def __init__(self, app_quota_id=None, name=None, call_limits=None, time_unit=None, time_interval=None, remark=None, reset_time=None, create_time=None, bound_app_num=None):
"""ShowAppBoundAppQuotaResponse
The model defined in huaweicloud sdk
:param app_quota_id: 客户端配额编号
:type app_quota_id: str
:param name: 配额名称。支持汉字,英文,数字,下划线,且只能以英文和汉字开头,3-255字符
:type name: str
:param call_limits: 客户端配额的访问次数限制
:type call_limits: int
:param time_unit: 限定时间单位:SECOND:秒、MINUTE:分、HOURE:时、DAY:天
:type time_unit: str
:param time_interval: 配额的限定时间值
:type time_interval: int
:param remark: 参数说明和描述
:type remark: str
:param reset_time: 首次配额重置时间点,不配置默认为首次调用时间计算
:type reset_time: str
:param create_time: 创建时间
:type create_time: datetime
:param bound_app_num: 配额策略已绑定应用数量
:type bound_app_num: int
"""
super(ShowAppBoundAppQuotaResponse, self).__init__()
self._app_quota_id = None
self._name = None
self._call_limits = None
self._time_unit = None
self._time_interval = None
self._remark = None
self._reset_time = None
self._create_time = None
self._bound_app_num = None
self.discriminator = None
if app_quota_id is not None:
self.app_quota_id = app_quota_id
if name is not None:
self.name = name
if call_limits is not None:
self.call_limits = call_limits
if time_unit is not None:
self.time_unit = time_unit
if time_interval is not None:
self.time_interval = time_interval
if remark is not None:
self.remark = remark
if reset_time is not None:
self.reset_time = reset_time
if create_time is not None:
self.create_time = create_time
if bound_app_num is not None:
self.bound_app_num = bound_app_num
@property
def app_quota_id(self):
"""Gets the app_quota_id of this ShowAppBoundAppQuotaResponse.
客户端配额编号
:return: The app_quota_id of this ShowAppBoundAppQuotaResponse.
:rtype: str
"""
return self._app_quota_id
@app_quota_id.setter
def app_quota_id(self, app_quota_id):
"""Sets the app_quota_id of this ShowAppBoundAppQuotaResponse.
客户端配额编号
:param app_quota_id: The app_quota_id of this ShowAppBoundAppQuotaResponse.
:type app_quota_id: str
"""
self._app_quota_id = app_quota_id
@property
def name(self):
"""Gets the name of this ShowAppBoundAppQuotaResponse.
配额名称。支持汉字,英文,数字,下划线,且只能以英文和汉字开头,3-255字符
:return: The name of this ShowAppBoundAppQuotaResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ShowAppBoundAppQuotaResponse.
配额名称。支持汉字,英文,数字,下划线,且只能以英文和汉字开头,3-255字符
:param name: The name of this ShowAppBoundAppQuotaResponse.
:type name: str
"""
self._name = name
@property
def call_limits(self):
"""Gets the call_limits of this ShowAppBoundAppQuotaResponse.
客户端配额的访问次数限制
:return: The call_limits of this ShowAppBoundAppQuotaResponse.
:rtype: int
"""
return self._call_limits
@call_limits.setter
def call_limits(self, call_limits):
"""Sets the call_limits of this ShowAppBoundAppQuotaResponse.
客户端配额的访问次数限制
:param call_limits: The call_limits of this ShowAppBoundAppQuotaResponse.
:type call_limits: int
"""
self._call_limits = call_limits
@property
def time_unit(self):
"""Gets the time_unit of this ShowAppBoundAppQuotaResponse.
限定时间单位:SECOND:秒、MINUTE:分、HOURE:时、DAY:天
:return: The time_unit of this ShowAppBoundAppQuotaResponse.
:rtype: str
"""
return self._time_unit
@time_unit.setter
def time_unit(self, time_unit):
"""Sets the time_unit of this ShowAppBoundAppQuotaResponse.
限定时间单位:SECOND:秒、MINUTE:分、HOURE:时、DAY:天
:param time_unit: The time_unit of this ShowAppBoundAppQuotaResponse.
:type time_unit: str
"""
self._time_unit = time_unit
@property
def time_interval(self):
"""Gets the time_interval of this ShowAppBoundAppQuotaResponse.
配额的限定时间值
:return: The time_interval of this ShowAppBoundAppQuotaResponse.
:rtype: int
"""
return self._time_interval
@time_interval.setter
def time_interval(self, time_interval):
"""Sets the time_interval of this ShowAppBoundAppQuotaResponse.
配额的限定时间值
:param time_interval: The time_interval of this ShowAppBoundAppQuotaResponse.
:type time_interval: int
"""
self._time_interval = time_interval
@property
def remark(self):
"""Gets the remark of this ShowAppBoundAppQuotaResponse.
参数说明和描述
:return: The remark of this ShowAppBoundAppQuotaResponse.
:rtype: str
"""
return self._remark
@remark.setter
def remark(self, remark):
"""Sets the remark of this ShowAppBoundAppQuotaResponse.
参数说明和描述
:param remark: The remark of this ShowAppBoundAppQuotaResponse.
:type remark: str
"""
self._remark = remark
@property
def reset_time(self):
"""Gets the reset_time of this ShowAppBoundAppQuotaResponse.
首次配额重置时间点,不配置默认为首次调用时间计算
:return: The reset_time of this ShowAppBoundAppQuotaResponse.
:rtype: str
"""
return self._reset_time
@reset_time.setter
def reset_time(self, reset_time):
"""Sets the reset_time of this ShowAppBoundAppQuotaResponse.
首次配额重置时间点,不配置默认为首次调用时间计算
:param reset_time: The reset_time of this ShowAppBoundAppQuotaResponse.
:type reset_time: str
"""
self._reset_time = reset_time
@property
def create_time(self):
"""Gets the create_time of this ShowAppBoundAppQuotaResponse.
创建时间
:return: The create_time of this ShowAppBoundAppQuotaResponse.
:rtype: datetime
"""
return self._create_time
@create_time.setter
def create_time(self, create_time):
"""Sets the create_time of this ShowAppBoundAppQuotaResponse.
创建时间
:param create_time: The create_time of this ShowAppBoundAppQuotaResponse.
:type create_time: datetime
"""
self._create_time = create_time
@property
def bound_app_num(self):
"""Gets the bound_app_num of this ShowAppBoundAppQuotaResponse.
配额策略已绑定应用数量
:return: The bound_app_num of this ShowAppBoundAppQuotaResponse.
:rtype: int
"""
return self._bound_app_num
@bound_app_num.setter
def bound_app_num(self, bound_app_num):
"""Sets the bound_app_num of this ShowAppBoundAppQuotaResponse.
配额策略已绑定应用数量
:param bound_app_num: The bound_app_num of this ShowAppBoundAppQuotaResponse.
:type bound_app_num: int
"""
self._bound_app_num = bound_app_num
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowAppBoundAppQuotaResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
1c35ab79d2407d00912bd1de8a96f73e7f06432d
|
f767e709ea95e9f5a995118dfde5a70590a19abe
|
/clearml/utilities/process/mp.py
|
24baec764d01bc27644a1be6b4d8447eff50dec5
|
[
"Apache-2.0"
] |
permissive
|
allegroai/clearml
|
f0d0439f933539d8bbc6b7229bceec385dcd35d7
|
095997492008f332a063ccc0086b4aac000c6218
|
refs/heads/master
| 2023-08-28T21:09:30.097330
| 2023-08-24T10:55:05
| 2023-08-24T10:55:05
| 191,126,383
| 2,777
| 438
|
Apache-2.0
| 2023-09-09T19:17:51
| 2019-06-10T08:18:32
|
Python
|
UTF-8
|
Python
| false
| false
| 29,304
|
py
|
mp.py
|
import os
import pickle
import struct
import sys
from functools import partial
from multiprocessing import Process, Semaphore, Event as ProcessEvent
from threading import Thread, Event as TrEvent, RLock as ThreadRLock
from time import sleep, time
from typing import List, Dict, Optional
import psutil
from six.moves.queue import Empty, Queue as TrQueue
from ..py3_interop import AbstractContextManager
try:
from multiprocessing import SimpleQueue
except ImportError:
from multiprocessing.queues import SimpleQueue
# Windows/MacOS compatibility
try:
from multiprocessing.context import ForkContext # noqa
except ImportError:
ForkContext = None
# PY2 compatibility
try:
from multiprocessing import get_context
except ImportError:
def get_context(*args, **kwargs): # noqa
import multiprocessing
return multiprocessing
class _ForkSafeThreadSyncObject(object):
__process_lock = get_context("fork" if sys.platform == "linux" else "spawn").Lock()
@classmethod
def _inner_lock(cls):
try:
# let's agree that 90sec should be enough to get a lock on such a short protected piece of code.
# if we failed, we have to assume some deadlock happened
# (Python is not very safe with regrades to Process Locks)
cls.__process_lock.acquire(block=True, timeout=90)
except: # noqa
pass
@classmethod
def _inner_unlock(cls):
try:
cls.__process_lock.release()
except: # noqa
# if we fail to release we might not have locked it in the first place (see timeout)
pass
def __init__(self, functor):
self._sync = None
self._instance_pid = None
self._functor = functor
def _create(self):
# this part is not atomic, and there is not a lot we can do about it.
if self._instance_pid != os.getpid() or not self._sync:
# Notice! This is NOT atomic, this means the first time accessed, two concurrent calls might
# end up overwriting each others, object
# even tough it sounds horrible, the worst case in our usage scenario
# is the first call usage is not "atomic"
# Notice the order! we first create the object and THEN update the pid,
# this is so whatever happens we Never try to used the old (pre-forked copy) of the synchronization object
try:
self._inner_lock()
# we have to check gain inside the protected locked area
if self._instance_pid != os.getpid() or not self._sync:
self._sync = self._functor()
self._instance_pid = os.getpid()
finally:
self._inner_unlock()
class ForkSafeRLock(_ForkSafeThreadSyncObject):
def __init__(self):
super(ForkSafeRLock, self).__init__(ThreadRLock)
def acquire(self, *args, **kwargs):
self._create()
return self._sync.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._sync is None:
return None
self._create()
return self._sync.release(*args, **kwargs)
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
def _is_owned(self):
self._create()
return self._sync._is_owned() # noqa
class ForkSemaphore(_ForkSafeThreadSyncObject):
def __init__(self, value=1):
super(ForkSemaphore, self).__init__(functor=partial(Semaphore, value))
def acquire(self, *args, **kwargs):
try:
self._create()
except BaseException: # noqa
return None
return self._sync.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._sync is None:
return None
self._create()
return self._sync.release(*args, **kwargs)
def get_value(self):
self._create()
return self._sync.get_value()
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
class ForkEvent(_ForkSafeThreadSyncObject):
def __init__(self):
super(ForkEvent, self).__init__(TrEvent)
def set(self):
self._create()
return self._sync.set()
def clear(self):
if self._sync is None:
return None
self._create()
return self._sync.clear()
def is_set(self):
self._create()
return self._sync.is_set()
def wait(self, *args, **kwargs):
self._create()
return self._sync.wait(*args, **kwargs)
class ForkQueue(_ForkSafeThreadSyncObject):
def __init__(self):
super(ForkQueue, self).__init__(TrQueue)
def get(self, *args, **kwargs):
self._create()
return self._sync.get(*args, **kwargs)
def put(self, *args, **kwargs):
self._create()
return self._sync.put(*args, **kwargs)
def empty(self):
if not self._sync:
return True
self._create()
return self._sync.empty()
def full(self):
if not self._sync:
return False
self._create()
return self._sync.full()
def close(self):
if not self._sync:
return
self._create()
return self._sync.close()
class ThreadCalls(object):
def __init__(self):
self._queue = ForkQueue()
self._thread = Thread(target=self._worker)
self._thread.daemon = True
self._thread.start()
def is_alive(self):
return bool(self._thread) and self._thread.is_alive()
def apply_async(self, func, args=None):
if not func:
return False
self._queue.put((func, args))
return True
def close(self, timeout=5.):
t = self._thread
if not t:
return
try:
# push something into queue so it knows this is the end
self._queue.put(None)
# wait fot thread it should not take long, so we have a 5 second timeout
# the background thread itself is doing nothing but push into a queue, so it should not take long
t.join(timeout=timeout)
except BaseException: # noqa
pass
# mark thread is done
self._thread = None
def _worker(self):
while True:
try:
request = self._queue.get(block=True, timeout=1.0)
if not request:
break
except Empty:
continue
# noinspection PyBroadException
try:
if request[1] is not None:
request[0](*request[1])
else:
request[0]()
except Exception:
pass
self._thread = None
class SingletonThreadPool(object):
__thread_pool = None
__thread_pool_pid = None
@classmethod
def get(cls):
if os.getpid() != cls.__thread_pool_pid:
cls.__thread_pool = ThreadCalls()
cls.__thread_pool_pid = os.getpid()
return cls.__thread_pool
@classmethod
def clear(cls):
if cls.__thread_pool:
cls.__thread_pool.close()
cls.__thread_pool = None
cls.__thread_pool_pid = None
@classmethod
def is_active(cls):
return cls.__thread_pool and cls.__thread_pool_pid == os.getpid() and cls.__thread_pool.is_alive()
class SafeQueue(object):
"""
Many writers Single Reader multiprocessing safe Queue
"""
__thread_pool = SingletonThreadPool()
def __init__(self, *args, **kwargs):
self._reader_thread = None
self._reader_thread_started = False
# Fix the python Queue and Use SimpleQueue write so it uses a single OS write,
# making it atomic message passing
self._q = SimpleQueue(*args, **kwargs)
# noinspection PyBroadException
try:
# noinspection PyUnresolvedReferences,PyProtectedMember
self._q._writer._send_bytes = partial(SafeQueue._pipe_override_send_bytes, self._q._writer)
except Exception:
pass
self._internal_q = None
# Note we should Never! assign a new object to `self._q_size`, just work with the initial object
self._q_size = [] # list of PIDs we pushed, so this is atomic.
def empty(self):
return self._q.empty() and (not self._internal_q or self._internal_q.empty())
def is_pending(self):
# check if we have pending requests to be pushed (it does not mean they were pulled)
# only call from main put process
return self._get_q_size_len() > 0
def close(self, event, timeout=3.0):
# wait until all pending requests pushed
tic = time()
pid = os.getpid()
prev_q_size = self._get_q_size_len(pid)
while self.is_pending():
if event:
event.set()
if not self.__thread_pool.is_active():
break
sleep(0.1)
# timeout is for the maximum time to pull a single object from the queue,
# this way if we get stuck we notice quickly and abort
if timeout and (time()-tic) > timeout:
if prev_q_size == self._get_q_size_len(pid):
break
else:
prev_q_size = self._get_q_size_len(pid)
tic = time()
def get(self, *args, **kwargs):
return self._get_internal_queue(*args, **kwargs)
def batch_get(self, max_items=1000, timeout=0.2, throttle_sleep=0.1):
buffer = []
timeout_count = int(timeout/throttle_sleep)
empty_count = timeout_count
while len(buffer) < max_items:
while not self.empty() and len(buffer) < max_items:
try:
buffer.append(self._get_internal_queue(block=False))
empty_count = 0
except Empty:
break
empty_count += 1
if empty_count > timeout_count or len(buffer) >= max_items:
break
sleep(throttle_sleep)
return buffer
def put(self, obj):
# not atomic when forking for the first time
# GIL will make sure it is atomic
self._q_size.append(os.getpid())
try:
# make sure the block put is done in the thread pool i.e. in the background
obj = pickle.dumps(obj)
if BackgroundMonitor.get_at_exit_state():
self._q_put(obj)
return
self.__thread_pool.get().apply_async(self._q_put, args=(obj, False))
except: # noqa
pid = os.getpid()
p = None
while p != pid and self._q_size:
p = self._q_size.pop()
def _get_q_size_len(self, pid=None):
pid = pid or os.getpid()
return len([p for p in self._q_size if p == pid])
def _q_put(self, obj, allow_raise=True):
# noinspection PyBroadException
try:
self._q.put(obj)
except BaseException:
# make sure we zero the _q_size of the process dies (i.e. queue put fails)
self._q_size.clear()
if allow_raise:
raise
return
pid = os.getpid()
# GIL will make sure it is atomic
# pop the First "counter" that is ours (i.e. pid == os.getpid())
p = None
while p != pid and self._q_size:
p = self._q_size.pop()
def _init_reader_thread(self):
if not self._internal_q:
self._internal_q = ForkQueue()
if not self._reader_thread or not self._reader_thread.is_alive():
# read before we start the thread
self._reader_thread = Thread(target=self._reader_daemon)
self._reader_thread.daemon = True
self._reader_thread.start()
# if we have waiting results
# wait until thread is up and pushed some results
while not self._reader_thread_started:
sleep(0.2)
# just in case make sure we pulled some stuff if we had any
# todo: wait until a queue is not empty, but for some reason that might fail
sleep(1.0)
def _get_internal_queue(self, *args, **kwargs):
self._init_reader_thread()
obj = self._internal_q.get(*args, **kwargs)
# deserialize
return pickle.loads(obj)
def _reader_daemon(self):
self._reader_thread_started = True
# pull from process queue and push into thread queue
while True:
# noinspection PyBroadException
try:
obj = self._q.get()
if obj is None:
break
except Exception:
break
self._internal_q.put(obj)
@staticmethod
def _pipe_override_send_bytes(self, buf):
n = len(buf)
# For wire compatibility with 3.2 and lower
header = struct.pack("!i", n)
# Issue #20540: concatenate before sending, to avoid delays due
# to Nagle's algorithm on a TCP socket.
# Also note we want to avoid sending a 0-length buffer separately,
# to avoid "broken pipe" errors if the other end closed the pipe.
self._send(header + buf)
class SafeEvent(object):
__thread_pool = SingletonThreadPool()
def __init__(self):
self._event = ProcessEvent()
def is_set(self):
return self._event.is_set()
def set(self):
if not BackgroundMonitor.is_subprocess_enabled() or BackgroundMonitor.is_subprocess_alive():
self._event.set()
# SafeEvent.__thread_pool.get().apply_async(func=self._event.set, args=())
def clear(self):
return self._event.clear()
def wait(self, timeout=None):
return self._event.wait(timeout=timeout)
class SingletonLock(AbstractContextManager):
_instances = []
def __init__(self):
self._lock = None
SingletonLock._instances.append(self)
def acquire(self, *args, **kwargs):
self.create()
return self._lock.acquire(*args, **kwargs)
def release(self, *args, **kwargs):
if self._lock is None:
return None
return self._lock.release(*args, **kwargs)
def create(self):
if self._lock is None:
self._lock = ForkSafeRLock()
@classmethod
def instantiate(cls):
for i in cls._instances:
i.create()
def __enter__(self):
"""Return `self` upon entering the runtime context."""
self.acquire()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Raise any exception triggered within the runtime context."""
# Do whatever cleanup.
self.release()
class BackgroundMonitor(object):
# If we need multiple monitoring contexts (i.e. subprocesses) this will become a dict
_main_process = None
_main_process_proc_obj = None
_main_process_task_id = None
_parent_pid = None
_sub_process_started = None
_at_exit = False
_instances = {} # type: Dict[int, List[BackgroundMonitor]]
def __init__(self, task, wait_period, for_model=False):
self._event = ForkEvent()
self._done_ev = ForkEvent()
self._start_ev = ForkEvent()
self._task_pid = os.getpid()
self._thread = None
self._thread_pid = None
self._wait_timeout = wait_period
self._subprocess = None if not for_model and task.is_main_task() else False
self._task_id = task.id
self._task_obj_id = id(task.id)
def start(self):
if not self._thread:
self._thread = True
self._event.clear()
self._done_ev.clear()
if self._subprocess is False:
# start the thread we are in threading mode.
self._start()
else:
# append to instances
if self not in self._get_instances():
self._get_instances().append(self)
def wait(self, timeout=None):
if not self._done_ev:
return
if not self.is_subprocess_mode() or self.is_subprocess_mode_and_parent_process():
self._done_ev.wait(timeout=timeout)
def _start(self):
# if we already started do nothing
if isinstance(self._thread, Thread):
if self._thread_pid == os.getpid():
return
self._thread_pid = os.getpid()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
def stop(self):
if not self._thread:
return
if not self._is_subprocess_mode_and_not_parent_process() and (
not self.is_subprocess_mode() or self.is_subprocess_alive()):
self._event.set()
if isinstance(self._thread, Thread):
try:
self._get_instances().remove(self)
except ValueError:
pass
self._thread = False
def daemon(self):
while True:
if self._event.wait(self._wait_timeout):
break
self._daemon_step()
def _daemon(self):
self._start_ev.set()
try:
self.daemon()
finally:
self.post_execution()
self._thread = False
def post_execution(self):
self._done_ev.set()
def set_subprocess_mode(self):
# called just before launching the daemon in a subprocess
if not self._subprocess:
self._subprocess = True
if not isinstance(self._done_ev, SafeEvent):
self._done_ev = SafeEvent()
if not isinstance(self._start_ev, SafeEvent):
self._start_ev = SafeEvent()
if not isinstance(self._event, SafeEvent):
self._event = SafeEvent()
def _daemon_step(self):
pass
@classmethod
def start_all(cls, task, wait_for_subprocess=True):
# noinspection PyProtectedMember
execute_in_subprocess = task._report_subprocess_enabled
if not execute_in_subprocess:
for d in BackgroundMonitor._instances.get(id(task.id), []):
d._start()
elif not BackgroundMonitor._main_process:
cls._parent_pid = os.getpid()
cls._sub_process_started = SafeEvent()
cls._sub_process_started.clear()
cls._main_process_task_id = task.id
# setup
for d in BackgroundMonitor._instances.get(id(task.id), []):
d.set_subprocess_mode()
# ToDo: solve for standalone spawn subprocess
# prefer os.fork, because multipprocessing.Process add atexit callback, which might later be invalid.
cls.__start_subprocess_os_fork(task_obj_id=id(task.id))
# if ForkContext is not None and isinstance(get_context(), ForkContext):
# cls.__start_subprocess_forkprocess(task_obj_id=id(task.id))
# else:
# cls.__start_subprocess_os_fork(task_obj_id=id(task.id))
# wait until subprocess is up
if wait_for_subprocess:
cls._sub_process_started.wait()
@classmethod
def __start_subprocess_os_fork(cls, task_obj_id):
process_args = (task_obj_id, cls._sub_process_started, os.getpid())
BackgroundMonitor._main_process = os.fork()
# check if we are the child process
if BackgroundMonitor._main_process == 0:
# update to the child process pid
BackgroundMonitor._main_process = os.getpid()
BackgroundMonitor._main_process_proc_obj = psutil.Process(BackgroundMonitor._main_process)
cls._background_process_start(*process_args)
# force to leave the subprocess
leave_process(0)
return
# update main process object (we are now in the parent process, and we update on the child's subprocess pid)
# noinspection PyBroadException
try:
BackgroundMonitor._main_process_proc_obj = psutil.Process(BackgroundMonitor._main_process)
except Exception:
# if we fail for some reason, do not crash, switch to thread mode when you can
BackgroundMonitor._main_process_proc_obj = None
@classmethod
def __start_subprocess_forkprocess(cls, task_obj_id):
_main_process = Process(
target=cls._background_process_start,
args=(task_obj_id, cls._sub_process_started, os.getpid())
)
_main_process.daemon = True
# Hack allow to create daemon subprocesses (even though python doesn't like it)
un_daemonize = False
# noinspection PyBroadException
try:
from multiprocessing import current_process
if current_process()._config.get('daemon'): # noqa
un_daemonize = current_process()._config.get('daemon') # noqa
current_process()._config['daemon'] = False # noqa
except BaseException:
pass
# try to start the background process, if we fail retry again, or crash
for i in range(4):
try:
_main_process.start()
break
except BaseException:
if i < 3:
sleep(1)
continue
raise
BackgroundMonitor._main_process = _main_process.pid
BackgroundMonitor._main_process_proc_obj = psutil.Process(BackgroundMonitor._main_process)
if un_daemonize:
# noinspection PyBroadException
try:
from multiprocessing import current_process
current_process()._config['daemon'] = un_daemonize # noqa
except BaseException:
pass
@classmethod
def _background_process_start(cls, task_obj_id, event_start=None, parent_pid=None):
# type: (int, Optional[SafeEvent], Optional[int]) -> None
is_debugger_running = bool(getattr(sys, 'gettrace', None) and sys.gettrace())
# make sure we update the pid to our own
cls._main_process = os.getpid()
cls._main_process_proc_obj = psutil.Process(cls._main_process)
# restore original signal, this will prevent any deadlocks
# Do not change the exception we need to catch base exception as well
# noinspection PyBroadException
try:
from ... import Task
# make sure we do not call Task.current_task() it will create a Task object for us on a subprocess!
# noinspection PyProtectedMember
if Task._has_current_task_obj():
# noinspection PyProtectedMember
Task.current_task()._remove_at_exit_callbacks()
except: # noqa
pass
# if a debugger is running, wait for it to attach to the subprocess
if is_debugger_running:
sleep(3)
instances = BackgroundMonitor._instances.get(task_obj_id, [])
# launch all the threads
for d in instances:
d._start()
if cls._sub_process_started:
cls._sub_process_started.set()
if event_start:
event_start.set()
# wait until we are signaled
for i in instances:
# DO NOT CHANGE, we need to catch base exception, if the process gte's killed
try:
while i._thread is None or (i._thread and i._thread.is_alive()):
# thread is still not up
if i._thread is None:
sleep(0.1)
continue
# noinspection PyBroadException
try:
p = psutil.Process(parent_pid)
parent_alive = p.is_running() and p.status() != psutil.STATUS_ZOMBIE
except Exception:
parent_alive = False
# if parent process is not here we should just leave!
if not parent_alive:
return
# DO NOT CHANGE, we need to catch base exception, if the process gte's killed
try:
# timeout so we can detect if the parent process got killed.
i._thread.join(timeout=30.)
except: # noqa
break
except: # noqa
pass
# we are done, leave process
return
def is_alive(self):
if not self.is_subprocess_mode():
return isinstance(self._thread, Thread) and self._thread.is_alive()
if self.get_at_exit_state():
return self.is_subprocess_alive() and self._thread
return self.is_subprocess_alive() and \
self._thread and \
self._start_ev.is_set() and \
not self._done_ev.is_set()
@classmethod
def _fast_is_subprocess_alive(cls):
if not cls._main_process_proc_obj:
return False
# we have to assume the process actually exists, so we optimize for
# just getting the object and status.
# noinspection PyBroadException
try:
return cls._main_process_proc_obj.is_running() and \
cls._main_process_proc_obj.status() != psutil.STATUS_ZOMBIE
except Exception:
return False
@classmethod
def is_subprocess_alive(cls, task=None):
if not cls._main_process or (task and cls._main_process_task_id != task.id):
return False
# noinspection PyBroadException
try:
p = psutil.Process(cls._main_process)
return p.is_running() and p.status() != psutil.STATUS_ZOMBIE
except Exception:
current_pid = cls._main_process
if not current_pid:
return False
try:
parent = psutil.Process(cls._parent_pid)
except psutil.Error:
# could not find parent process id
return
for child in parent.children(recursive=True):
# kill ourselves last (if we need to)
if child.pid == current_pid:
return child.is_running() and child.status() != psutil.STATUS_ZOMBIE
return False
def is_subprocess_mode(self):
return self._subprocess is not False and \
bool(self._main_process) and self._task_id == self._main_process_task_id
def _get_instances(self):
return self._instances.setdefault(self._task_obj_id, [])
def _is_subprocess_mode_and_not_parent_process(self):
return self.is_subprocess_mode() and self._parent_pid != os.getpid()
def is_subprocess_mode_and_parent_process(self):
return self.is_subprocess_mode() and self._parent_pid == os.getpid()
def _is_thread_mode_and_not_main_process(self):
if self.is_subprocess_mode():
return False
from ... import Task
# noinspection PyProtectedMember
return Task._Task__is_subprocess()
@classmethod
def is_subprocess_enabled(cls, task=None):
return bool(cls._main_process) and (not task or task.id == cls._main_process_task_id)
@classmethod
def clear_main_process(cls, task):
if BackgroundMonitor._main_process_task_id != task.id:
return
cls.wait_for_sub_process(task)
BackgroundMonitor._main_process = None
BackgroundMonitor._main_process_proc_obj = None
BackgroundMonitor._main_process_task_id = None
BackgroundMonitor._parent_pid = None
BackgroundMonitor._sub_process_started = None
BackgroundMonitor._instances = {}
SingletonThreadPool.clear()
@classmethod
def wait_for_sub_process(cls, task, timeout=None):
if not cls.is_subprocess_enabled(task=task):
return
for d in BackgroundMonitor._instances.get(id(task.id), []):
d.stop()
tic = time()
while cls.is_subprocess_alive(task=task) and (not timeout or time()-tic < timeout):
sleep(0.03)
@classmethod
def set_at_exit_state(cls, state=True):
cls._at_exit = bool(state)
@classmethod
def get_at_exit_state(cls):
return cls._at_exit
def leave_process(status=0):
# type: (int) -> None
"""
Exit current process with status-code (status)
:param status: int exit code
"""
try:
sys.exit(status or 0)
except: # noqa
# ipython/jupyter notebook will not allow to call sys.exit
# we have to call the low level function
os._exit(status or 0) # noqa
|
f8e0a532a300a637552c1d6efaa9d425f2f85786
|
0711bf74bec3c4e34101a9a76d1b55058ae295f0
|
/skgstat/plotting/variogram_plot.py
|
5b5f4b2beeb2b5713358c76e5e2c88ae9d3f215f
|
[
"MIT"
] |
permissive
|
mmaelicke/scikit-gstat
|
03282ac578c15491c8a78676266f7631d71e0946
|
01e13feea26b9d22312516d2e1167b3d9881ad00
|
refs/heads/main
| 2023-08-17T23:34:48.240130
| 2023-08-01T06:13:56
| 2023-08-01T06:13:56
| 98,853,365
| 196
| 54
|
MIT
| 2023-08-22T23:23:11
| 2017-07-31T05:59:40
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,433
|
py
|
variogram_plot.py
|
import numpy as np
import matplotlib.pyplot as plt
try:
from plotly.subplots import make_subplots
import plotly.graph_objects as go
except ImportError:
pass
def __calculate_plot_data(variogram):
# get the parameters
_bins = variogram.bins
_exp = variogram.experimental
x = np.linspace(0, np.nanmax(_bins), 100)
# apply the model
y = variogram.transform(x)
# handle the relative experimental variogram
if variogram.normalized:
_bins /= np.nanmax(_bins)
y /= np.max(_exp)
_exp /= np.nanmax(_exp)
x /= np.nanmax(x)
return x, y, _bins, _exp
def matplotlib_variogram_plot(
variogram,
axes=None,
grid=True,
show=True,
hist=True
):
# get the plotting data
x, y, _bins, _exp = __calculate_plot_data(variogram)
# do the plotting
if axes is None:
if hist:
fig = plt.figure(figsize=(8, 5))
ax1 = plt.subplot2grid((5, 1), (1, 0), rowspan=4)
ax2 = plt.subplot2grid((5, 1), (0, 0), sharex=ax1)
fig.subplots_adjust(hspace=0)
else:
fig, ax1 = plt.subplots(1, 1, figsize=(8, 4))
ax2 = None
elif isinstance(axes, (list, tuple, np.ndarray)):
ax1, ax2 = axes
fig = ax1.get_figure()
else:
ax1 = axes
ax2 = None
fig = ax1.get_figure()
# ------------------------
# plot Variograms model
ax1.plot(_bins, _exp, '.b')
ax1.plot(x, y, '-g')
# ax limits
if variogram.normalized:
ax1.set_xlim([0, 1.05])
ax1.set_ylim([0, 1.05])
# grid settings
if grid:
ax1.grid(False)
ax1.vlines(
_bins,
*ax1.axes.get_ybound(),
colors=(.85, .85, .85),
linestyles='dashed'
)
# always print error bars above grid
conf = variogram._experimental_conf_interval
if conf is not None:
lo = conf[:, 1] - conf[:, 0]
up = conf[:, 2] - conf[:, 1]
yerr = np.column_stack((lo, up)).T
ax1.errorbar(_bins, _exp, fmt='.b', yerr=yerr)
# annotation
ax1.axes.set_ylabel('semivariance (%s)' % variogram._estimator.__name__)
ax1.axes.set_xlabel('Lag (-)')
# ------------------------
# plot histogram
if ax2 is not None and hist:
# calc the histogram
_count = np.fromiter(
(g.size for g in variogram.lag_classes()), dtype=int
)
# set the sum of hist bar widths to 70% of the x-axis space
w = (np.max(_bins) * 0.7) / len(_count)
# plot
ax2.bar(_bins, _count, width=w, align='center', color='red')
# adjust
plt.setp(ax2.axes.get_xticklabels(), visible=False)
ax2.axes.set_yticks(ax2.axes.get_yticks()[1:])
# need a grid?
if grid: # pragma: no cover
ax2.grid(False)
ax2.vlines(
_bins,
*ax2.axes.get_ybound(),
colors=(.85, .85, .85),
linestyles='dashed'
)
# anotate
ax2.axes.set_ylabel('N')
# show the figure
if show: # pragma: no cover
fig.show()
return fig
def plotly_variogram_plot(
variogram,
fig=None,
grid=True,
show=True,
hist=True
):
# get the plotting data
x, y, _bins, _exp = __calculate_plot_data(variogram)
# create the figure
if fig is None:
if hist:
fig = make_subplots(
rows=5, cols=1, shared_xaxes=True, vertical_spacing=0.0,
specs=[
[{}], [{'rowspan': 4}], [None], [None], [None]
]
)
else:
fig = make_subplots(rows=1, cols=1)
elif isinstance(fig, go.Figure):
pass
else:
raise ValueError('axes has to be None or a plotly.Figure.')
# handle error bars on exerimental
conf = variogram._experimental_conf_interval
if conf is not None:
error_y = dict(
type='data',
symmetric=False,
array=conf[:, 1] - conf[:, 0],
arrayminus=conf[:, 2] - conf[:, 1]
)
else:
error_y = None
# main plot
fig.add_trace(
go.Scatter(
x=_bins,
y=_exp,
error_y=error_y,
mode='markers',
marker=dict(color='blue'),
name='Experimental'
),
row=2 if hist else 1, col=1
)
fig.add_trace(
go.Scatter(
x=x,
y=y,
mode='lines',
marker=dict(color='green'),
name='%s model' % variogram.model.__name__
),
row=2 if hist else 1, col=1
)
# update axis title
fig.update_xaxes(title_text='Lag [-]', row=2 if hist else 1, col=1)
fig.update_yaxes(
title_text='semivariance (%s)' % variogram.estimator.__name__,
row=2 if hist else 1, col=1
)
# hist
if hist:
# calculate
_count = np.fromiter(
(g.size for g in variogram.lag_classes()),
dtype=int
)
fig.add_trace(
go.Bar(
x=_bins,
y=_count,
marker=dict(color='red'),
name='Histogram'
)
)
# title
fig.update_yaxes(title_text='# of pairs', row=1, col=1)
if show:
fig.show()
return fig
|
e126b1cf172f3d1e3c1d87314edb84ffbaf7a738
|
dac3b9a30096080275c9de63f9c8c081305a837e
|
/src/utils/log.py
|
21c8ca402f1d5223245398d44702d89b64ecec54
|
[
"MIT"
] |
permissive
|
Karmenzind/fp-server
|
6d7caee8bc6b8f1fc092ce1f1f66d39e39a07c6b
|
931fca8fab9d7397c52cf9e76a76b1c60e190403
|
refs/heads/master
| 2023-05-31T05:06:22.985030
| 2021-12-15T06:10:04
| 2021-12-15T06:10:04
| 135,252,696
| 180
| 46
|
MIT
| 2023-05-25T17:17:45
| 2018-05-29T06:42:59
|
Python
|
UTF-8
|
Python
| false
| false
| 3,138
|
py
|
log.py
|
# -*- coding:utf-8 -*-
"""
日志打印
"""
import logging
import os
import sys
from tornado import log
from tornado.options import options
def initLogger(log_level='debug', log_path=None, logfile_name=None):
""" 初始化日志输出
@param log_level 日志级别 debug info
@param log_path 日志输出路径
@param logfile_name 日志文件名
"""
if log_level == 'info':
options.logging = 'info'
else:
options.logging = 'debug'
logger = logging.getLogger()
if logfile_name:
if not os.path.isdir(log_path):
os.makedirs(log_path)
logfile = os.path.join(log_path, logfile_name)
print('init logger ...:', logfile)
handler = logging.handlers.TimedRotatingFileHandler(
logfile, 'midnight')
else:
handler = logging.StreamHandler()
fmt_str = '%(levelname)1.1s [%(asctime)s] %(message)s'
fmt = log.LogFormatter(fmt=fmt_str, datefmt=None)
handler.setFormatter(fmt)
logger.addHandler(handler)
def info(*args, **kwargs):
func_name, kwargs = _log_msg_header(*args, **kwargs)
logging.info(_log(func_name, *args, **kwargs))
def warn(*args, **kwargs):
msg_header, kwargs = _log_msg_header(*args, **kwargs)
logging.warning(_log(msg_header, *args, **kwargs))
def debug(*args, **kwargs):
msg_header, kwargs = _log_msg_header(*args, **kwargs)
logging.debug(_log(msg_header, *args, **kwargs))
def error(*args, **kwargs):
logging.error('*' * 40)
msg_header, kwargs = _log_msg_header(*args, **kwargs)
logging.error(_log(msg_header, *args, **kwargs))
logging.error('*' * 40)
exception = error
def _log(msg_header, *args, **kwargs):
_log_msg = msg_header
for l in args:
if type(l) == tuple:
ps = str(l)
else:
try:
ps = '%r' % l
except:
ps = str(l)
if type(l) == str:
_log_msg += ps[1:-1] + ' '
else:
_log_msg += ps + ' '
if len(kwargs) > 0:
_log_msg += str(kwargs)
return _log_msg
def _log_msg_header(*args, **kwargs):
""" 打印日志的message头
@param kwargs['caller'] 调用的方法所属类对象
@param kwargs['session_id'] 调用的方法所带的session_id
* NOTE: logger.xxx(... caller=self) for instance method
logger.xxx(... caller=cls) for @classmethod
"""
cls_name = ''
func_name = sys._getframe().f_back.f_back.f_code.co_name
session_id = '-'
try:
_caller = kwargs.get('caller', None)
if _caller:
if not hasattr(_caller, '__name__'):
_caller = _caller.__class__
cls_name = _caller.__name__
del kwargs['caller']
session_id = kwargs.get('session_id', '-')
if session_id:
del kwargs['session_id']
except:
pass
finally:
msg_header = '[{cls_name}.{func_name}] [{session_id}] '.format(
cls_name=cls_name,
func_name=func_name,
session_id=session_id,
)
return msg_header, kwargs
|
ccd3224867da89519fdf6a093ed9cd57933e1611
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/extensions/common/api/externs_checker_test.py
|
62623d97b2a21d5c0f73ccd25a2bcbb193ee66ae
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
externs_checker_test.py
|
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from externs_checker import ExternsChecker
sys.path.append(
os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', '..'))
from PRESUBMIT_test_mocks import (MockInputApi, MockOutputApi, MockFile,
MockChange)
class ExternsCheckerTest(unittest.TestCase):
API_PAIRS = {'a': '1', 'b': '2', 'c': '3'}
def _runChecks(self, files, exists=lambda f: True):
input_api = MockInputApi()
input_api.os_path.exists = exists
input_api.files = [MockFile(f, '') for f in files]
input_api.change = MockChange(input_api.files)
output_api = MockOutputApi()
checker = ExternsChecker(input_api, output_api, self.API_PAIRS)
return checker.RunChecks()
def testModifiedSourceWithoutModifiedExtern(self):
results = self._runChecks(['b', 'test', 'random'])
self.assertEquals(1, len(results))
self.assertEquals(1, len(results[0].items))
self.assertEquals('b', results[0].items[0])
self.assertEquals(
'To update the externs, run:\n'
' src/ $ python tools/json_schema_compiler/compiler.py b --root=. '
'--generator=externs > 2',
results[0].long_text)
def testModifiedSourceWithModifiedExtern(self):
results = self._runChecks(['b', '2', 'test', 'random'])
self.assertEquals(0, len(results))
def testModifiedMultipleSourcesWithNoModifiedExterns(self):
results = self._runChecks(['b', 'test', 'c', 'random'])
self.assertEquals(1, len(results))
self.assertEquals(2, len(results[0].items))
self.assertTrue('b' in results[0].items)
self.assertTrue('c' in results[0].items)
self.assertEquals(
'To update the externs, run:\n'
' src/ $ python tools/json_schema_compiler/compiler.py <source_file> '
'--root=. --generator=externs > <output_file>',
results[0].long_text)
def testModifiedMultipleSourcesWithOneModifiedExtern(self):
results = self._runChecks(['b', 'test', 'c', 'random', '2'])
self.assertEquals(1, len(results))
self.assertEquals(1, len(results[0].items))
self.assertEquals('c', results[0].items[0])
def testApiFileDoesNotExist(self):
exists = lambda f: f in ['a', 'b', 'c', '1', '2']
with self.assertRaises(OSError) as e:
self._runChecks(['a'], exists)
self.assertEqual('Path Not Found: 3', str(e.exception))
if __name__ == '__main__':
unittest.main()
|
8b902bfbc8da1f8f8433e696972e621fe15cfee3
|
aa4c2e6ba174ac8de4b0b053f54a26006f74d682
|
/cpp/transfer-learning/convert.py
|
59dbe4d8e7c4c3e50aa8dd80e37b33c92ee5c600
|
[
"BSD-3-Clause"
] |
permissive
|
pytorch/examples
|
cde8a193c6e25e918a16aabf049d887ce3e969e0
|
13009eff7a80ebcf6ae89ed217d5d176bd3e019d
|
refs/heads/main
| 2023-08-28T10:01:58.283401
| 2023-08-20T05:29:44
| 2023-08-20T05:29:44
| 66,424,871
| 22,842
| 11,294
|
BSD-3-Clause
| 2023-09-14T01:25:01
| 2016-08-24T03:12:48
|
Python
|
UTF-8
|
Python
| false
| false
| 556
|
py
|
convert.py
|
"""
This python script converts the network into Script Module
"""
import torch
from torchvision import models
# Download and load the pre-trained model
model = models.resnet18(pretrained=True)
# Set upgrading the gradients to False
for param in model.parameters():
param.requires_grad = False
# Save the model except the final FC Layer
resnet18 = torch.nn.Sequential(*list(model.children())[:-1])
example_input = torch.rand(1, 3, 224, 224)
script_module = torch.jit.trace(resnet18, example_input)
script_module.save('resnet18_without_last_layer.pt')
|
43f289bd6c29508b4765c06e6a31594eb1b0350e
|
82b05fc158acbb10263a9e2415caf31ed4ea1ff4
|
/graphbrain/__init__.py
|
2de606c233344faaaf3b42dea2f3711630fcc5ec
|
[
"MIT"
] |
permissive
|
graphbrain/graphbrain
|
e655de5c9f7d755b7a34649a461762d7def501ff
|
8cb019eeea4bfba036f66ca742f1b4c3fc2c9c6a
|
refs/heads/master
| 2023-09-04T04:07:04.985162
| 2023-07-19T12:41:20
| 2023-07-19T12:41:20
| 51,751,006
| 534
| 60
|
MIT
| 2023-03-10T21:32:47
| 2016-02-15T11:25:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,048
|
py
|
__init__.py
|
from contextlib import contextmanager
from graphbrain.hyperedge import hedge
import graphbrain.memory.leveldb
import graphbrain.memory.sqlite
def hgraph(locator_string):
"""Returns an instance of Hypergraph identified by the locator_string.
The hypergraph will be created if it does not exist.
The location_string can be the path to an SQLite3 file or LevelDB folder.
"""
filename_parts = locator_string.split('.')
if len(filename_parts) > 1:
extension = filename_parts[-1]
if extension in {'sqlite', 'sqlite3', 'db'}:
return graphbrain.memory.sqlite.SQLite(locator_string)
elif extension in {'leveldb', 'hg'}:
return graphbrain.memory.leveldb.LevelDB(locator_string)
raise RuntimeError('Unknown hypergraph database type.')
@contextmanager
def hopen(*args, **kwds):
hg = hgraph(*args, **kwds)
hg.begin_transaction()
hg.batch_mode = True
try:
yield hg
finally:
hg.batch_mode = False
hg.end_transaction()
hg.close()
|
9d18cad747ec04884b62304347be211f957c399f
|
e7efae2b83216d9621bd93390959d652de779c3d
|
/silk/tests/test_unit.py
|
283b1f9f09760fa26964436d80ee3d2d3bc4ed00
|
[
"BSD-3-Clause",
"MIT",
"BSD-3-Clause-Modification",
"Unlicense",
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"CC0-1.0"
] |
permissive
|
DataDog/integrations-core
|
ee1886cc7655972b2791e6ab8a1c62ab35afdb47
|
406072e4294edff5b46b513f0cdf7c2c00fac9d2
|
refs/heads/master
| 2023-08-31T04:08:06.243593
| 2023-08-30T18:22:10
| 2023-08-30T18:22:10
| 47,203,045
| 852
| 1,548
|
BSD-3-Clause
| 2023-09-14T16:39:54
| 2015-12-01T16:41:45
|
Python
|
UTF-8
|
Python
| false
| false
| 8,173
|
py
|
test_unit.py
|
# (C) Datadog, Inc. 2023-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
from copy import deepcopy
from itertools import chain
import mock
import pytest
from datadog_checks.base import ConfigurationError
from datadog_checks.silk import SilkCheck
from datadog_checks.silk.metrics import BLOCKSIZE_METRICS, METRICS, READ_WRITE_METRICS, Metric
from .common import HOST, mock_get_data
pytestmark = [pytest.mark.unit]
def test_submit_system_state_error(instance, caplog):
caplog.set_level(logging.DEBUG)
check = SilkCheck('silk', {}, [instance])
check._get_data = mock.MagicMock(side_effect=[(None, 404)])
check.submit_system_state()
assert (
"Could not access system state and version info, got response code `404` from endpoint `system/state`"
in caplog.text
)
@pytest.mark.parametrize(
'get_data_url, expected_metrics, metrics_to_collect',
[
pytest.param(
'system__bs_breakdown=True.json', # `?` had to be removed to pass windows CI
[
'silk.system.block_size.io_ops.avg',
'silk.system.block_size.latency.inner',
'silk.system.block_size.latency.outer',
'silk.system.block_size.throughput.avg',
],
{
'stats/system?__bs_breakdown=True': Metric(
**{
'prefix': 'system.block_size',
'metrics': {
'iops_avg': 'io_ops.avg',
'latency_inner': 'latency.inner',
'latency_outer': 'latency.outer',
'throughput_avg': 'throughput.avg',
},
'tags': {
'resolution': 'resolution',
'bs': 'block_size',
},
}
)
},
id="system bs metrics",
),
pytest.param(
'volumes__bs_breakdown=True.json',
[
'silk.volume.block_size.io_ops.avg',
'silk.volume.block_size.latency.inner',
'silk.volume.block_size.latency.outer',
'silk.volume.block_size.throughput.avg',
],
{
'stats/volumes?__bs_breakdown=True': Metric(
**{
'prefix': 'volume.block_size',
'metrics': {
'iops_avg': ('io_ops.avg', 'gauge'),
'latency_inner': 'latency.inner',
'latency_outer': 'latency.outer',
'throughput_avg': 'throughput.avg',
},
'tags': {
'peer_k2_name': 'peer_name',
'volume_name': 'volume_name',
'resolution': 'resolution',
'bs': 'block_size',
},
}
)
},
id="volume bs metrics",
),
pytest.param(
'volumes__rw_breakdown=True.json',
[
'silk.volume.read.io_ops.avg',
'silk.volume.read.latency.inner',
'silk.volume.read.latency.outer',
'silk.volume.read.throughput.avg',
'silk.volume.write.io_ops.avg',
'silk.volume.write.latency.inner',
'silk.volume.write.latency.outer',
'silk.volume.write.throughput.avg',
],
{
'stats/volumes?__rw_breakdown=True': Metric(
**{
'prefix': 'volume',
'metrics': {
'iops_avg': ('io_ops.avg', 'gauge'),
'latency_inner': 'latency.inner',
'latency_outer': 'latency.outer',
'throughput_avg': 'throughput.avg',
},
'tags': {
'peer_k2_name': 'peer_name',
'volume_name': 'volume_name',
'resolution': 'resolution',
},
'field_to_name': {
'rw': {
'r': 'read',
'w': 'write',
}
},
}
)
},
id="volume rw metrics",
),
pytest.param(
'system__rw_breakdown=True.json',
[
'silk.system.read.io_ops.avg',
'silk.system.read.latency.inner',
'silk.system.read.latency.outer',
'silk.system.read.throughput.avg',
'silk.system.write.io_ops.avg',
'silk.system.write.latency.inner',
'silk.system.write.latency.outer',
'silk.system.write.throughput.avg',
],
{
'stats/system?__rw_breakdown=True': Metric(
**{
'prefix': 'system',
'metrics': {
'iops_avg': 'io_ops.avg',
'latency_inner': 'latency.inner',
'latency_outer': 'latency.outer',
'throughput_avg': 'throughput.avg',
},
'tags': {
'resolution': 'resolution',
},
'field_to_name': {
'rw': {
'r': 'read',
'w': 'write',
}
},
}
)
},
id="system rw metrics",
),
],
)
def test_bs_rw_metrics(aggregator, instance, get_data_url, expected_metrics, metrics_to_collect):
check = SilkCheck('silk', {}, [instance])
check._get_data = mock.MagicMock(side_effect=mock_get_data(get_data_url))
check.metrics_to_collect = metrics_to_collect
base_tags = ['silk_host:localhost:80', 'system_id:5501', 'system_name:K2-5501', 'test:silk']
check.collect_metrics(base_tags)
for metric in expected_metrics:
aggregator.assert_metric(metric)
for tag in base_tags:
aggregator.assert_metric_has_tag(metric, tag)
@pytest.mark.parametrize(
'enable_rw, enable_bs, extra_metrics_to_collect',
[
pytest.param(False, False, {}, id="both disabled"),
pytest.param(True, True, dict(chain(BLOCKSIZE_METRICS.items(), READ_WRITE_METRICS.items())), id="both enabled"),
pytest.param(False, True, deepcopy(BLOCKSIZE_METRICS), id="bs enabled"),
pytest.param(True, False, deepcopy(READ_WRITE_METRICS), id="rw enabled"),
],
)
def test_metrics_to_collect(instance, enable_rw, enable_bs, extra_metrics_to_collect):
inst = deepcopy(instance)
inst['enable_read_write_statistics'] = enable_rw
inst['enable_blocksize_statistics'] = enable_bs
check = SilkCheck('silk', {}, [inst])
expected_metrics_to_collect = deepcopy(METRICS)
expected_metrics_to_collect.update(extra_metrics_to_collect)
assert sorted(check.metrics_to_collect.keys()) == sorted(expected_metrics_to_collect.keys())
def test_unreachable_endpoint(dd_run_check, aggregator):
invalid_instance = {'host_address': 'http://{}:81'.format(HOST)}
check = SilkCheck('silk', {}, [invalid_instance])
with pytest.raises(Exception):
dd_run_check(check)
aggregator.assert_service_check('silk.can_connect', SilkCheck.CRITICAL)
def test_incorrect_config(dd_run_check):
invalid_instance = {'host_addres': 'localhost'} # misspelled required parameter
with pytest.raises(ConfigurationError):
SilkCheck('silk', {}, [invalid_instance])
|
8eb3e4d98902d64e0ab9e39ec6e8741de91186d7
|
c4039d6c964407d74d8625d340d90586a611c3c7
|
/utils/data_loader.py
|
cae393b84b1ab6fa6e663e133b69363156a4ed26
|
[
"MIT"
] |
permissive
|
Zeleni9/pytorch-wgan
|
2874878a1c5947bfd94e83838f2f6c6f7394804e
|
d5b9b4db573f2efbfa56e115d46b28d1f0465312
|
refs/heads/master
| 2023-04-06T03:55:31.771605
| 2022-01-06T14:30:50
| 2022-01-06T14:30:50
| 122,645,948
| 612
| 155
|
MIT
| 2023-03-25T01:34:09
| 2018-02-23T16:32:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,211
|
py
|
data_loader.py
|
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.utils.data as data_utils
from utils.fashion_mnist import MNIST, FashionMNIST
def get_data_loader(args):
if args.dataset == 'mnist':
trans = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, )),
])
train_dataset = MNIST(root=args.dataroot, train=True, download=args.download, transform=trans)
test_dataset = MNIST(root=args.dataroot, train=False, download=args.download, transform=trans)
elif args.dataset == 'fashion-mnist':
trans = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, )),
])
train_dataset = FashionMNIST(root=args.dataroot, train=True, download=args.download, transform=trans)
test_dataset = FashionMNIST(root=args.dataroot, train=False, download=args.download, transform=trans)
elif args.dataset == 'cifar':
trans = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_dataset = dset.CIFAR10(root=args.dataroot, train=True, download=args.download, transform=trans)
test_dataset = dset.CIFAR10(root=args.dataroot, train=False, download=args.download, transform=trans)
elif args.dataset == 'stl10':
trans = transforms.Compose([
transforms.Resize(32),
transforms.ToTensor(),
])
train_dataset = dset.STL10(root=args.dataroot, split='train', download=args.download, transform=trans)
test_dataset = dset.STL10(root=args.dataroot, split='test', download=args.download, transform=trans)
# Check if everything is ok with loading datasets
assert train_dataset
assert test_dataset
train_dataloader = data_utils.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_dataloader = data_utils.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=True)
return train_dataloader, test_dataloader
|
36e8826591d0ea93dfdeb63f273ec9eae2c0ebd8
|
4dadda8e524c1b745f68d8d9bb1fb2840c91bb84
|
/tools/legacy/trng_test.py
|
dc1770eaabc7ecec0a6c09777a60cefcd4a543ba
|
[
"Apache-2.0"
] |
permissive
|
betrusted-io/xous-core
|
74e684833378c44718c68a993357477812444607
|
f17ce555f7539b534b910fe832d273afe5ad27fc
|
refs/heads/main
| 2023-09-03T04:15:16.852337
| 2023-08-14T06:22:19
| 2023-08-14T06:22:19
| 246,203,193
| 439
| 72
| null | 2023-09-12T11:56:31
| 2020-03-10T03:52:58
|
Rust
|
UTF-8
|
Python
| false
| false
| 13,419
|
py
|
trng_test.py
|
#! /usr/bin/env python3
import argparse
import usb.core
import usb.util
import array
import sys
import hashlib
import csv
import time
from progressbar.bar import ProgressBar
class PrecursorUsb:
def __init__(self, dev):
self.dev = dev
self.RDSR = 0x05
self.RDSCUR = 0x2B
self.RDID = 0x9F
self.WREN = 0x06
self.WRDI = 0x04
self.SE4B = 0x21
self.BE4B = 0xDC
self.PP4B = 0x12
self.registers = {}
self.regions = {}
self.gitrev = ''
self.vexdbg_addr = None
def register(self, name):
return int(self.registers[name], 0)
def peek(self, addr, display=False):
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
for attempt in range(10):
try:
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
except Exception as e:
try:
self.dev.reset()
except usb.core.USBTimeoutError:
pass
time.sleep(3.0)
else:
break
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
if display == True:
sys.stderr.write("0x{:08x}\n".format(read_data))
return read_data
def poke(self, addr, wdata, check=False, display=False):
if check == True:
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
sys.stderr.write("before poke: 0x{:08x}\n".format(read_data))
data = array.array('B', wdata.to_bytes(4, 'little'))
for attempt in range(10):
try:
numwritten = self.dev.ctrl_transfer(bmRequestType=(0x00 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
except Exception as e:
sys.stderr.write("error; resetting device\n")
try:
self.dev.reset()
except usb.core.USBTimeoutError:
pass
time.sleep(3.0)
else:
break
if check == True:
_dummy_s = '\x00'.encode('utf-8')
data = array.array('B', _dummy_s * 4)
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(addr & 0xffff), wIndex=((addr >> 16) & 0xffff),
data_or_wLength=data, timeout=500)
read_data = int.from_bytes(data.tobytes(), byteorder='little', signed=False)
sys.stderr.write("after poke: 0x{:08x}\n".format(read_data))
if display == True:
sys.stderr.write("wrote 0x{:08x} to 0x{:08x}\n".format(wdata, addr))
def burst_read(self, addr, len):
_dummy_s = '\x00'.encode('utf-8')
maxlen = 4096
ret = bytearray()
packet_count = len // maxlen
if (len % maxlen) != 0:
packet_count += 1
time.sleep(0.2) # this improves system stability, somehow
for pkt_num in range(packet_count):
# sys.stderr.write('.', end='')
cur_addr = addr + pkt_num * maxlen
if pkt_num == packet_count - 1:
if len % maxlen != 0:
bufsize = len % maxlen
else:
bufsize = maxlen
else:
bufsize = maxlen
data = array.array('B', _dummy_s * bufsize)
if self.vexdbg_addr != None:
self.poke(self.vexdbg_addr, 0x00020000)
time.sleep(0.1)
for attempt in range(10):
try:
numread = self.dev.ctrl_transfer(bmRequestType=(0x80 | 0x43), bRequest=0,
wValue=(cur_addr & 0xffff), wIndex=((cur_addr >> 16) & 0xffff),
data_or_wLength=data, timeout=60000)
except Exception as e:
sys.stderr.write("error; resetting device\n")
try:
self.dev.reset()
except:
pass
time.sleep(2.2)
else:
break
else:
sys.stderr.write("Burst read failed\n")
#exit(1)
if self.vexdbg_addr != None:
self.poke(self.vexdbg_addr, 0x02000000)
time.sleep(0.1)
if numread != bufsize:
sys.stderr.write("Burst read error: {} bytes requested, {} bytes read at 0x{:08x}\n".format(bufsize, numread, cur_addr))
else:
ret = ret + data
return ret
def burst_write(self, addr, data):
if len(data) == 0:
return
maxlen = 4096
packet_count = len(data) // maxlen
if (len(data) % maxlen) != 0:
packet_count += 1
for pkt_num in range(packet_count):
cur_addr = addr + pkt_num * maxlen
if pkt_num == packet_count - 1:
if len(data) % maxlen != 0:
bufsize = len(data) % maxlen
else:
bufsize = maxlen
else:
bufsize = maxlen
wdata = array.array('B', data[(pkt_num * maxlen):(pkt_num * maxlen) + bufsize])
numwritten = self.dev.ctrl_transfer(bmRequestType=(0x00 | 0x43), bRequest=0,
wValue=(cur_addr & 0xffff), wIndex=((cur_addr >> 16) & 0xffff),
data_or_wLength=wdata, timeout=500)
if numwritten != bufsize:
sys.stderr.write("Burst write error: {} bytes requested, {} bytes written at 0x{:08x}".format(bufsize, numwritten, cur_addr))
exit(1)
def ping_wdt(self):
self.poke(self.register('wdt_watchdog'), 1, display=False)
self.poke(self.register('wdt_watchdog'), 1, display=False)
def load_csrs(self):
LOC_CSRCSV = 0x20277000 # this address shouldn't change because it's how we figure out our version number
csr_data = self.burst_read(LOC_CSRCSV, 0x8000)
hasher = hashlib.sha512()
hasher.update(csr_data[:0x7FC0])
digest = hasher.digest()
if digest != csr_data[0x7fc0:]:
sys.stderr.write("Could not find a valid csr.csv descriptor on the device, aborting!\n")
exit(1)
csr_len = int.from_bytes(csr_data[:4], 'little')
csr_extracted = csr_data[4:4+csr_len]
decoded = csr_extracted.decode('utf-8')
# strip comments
stripped = []
for line in decoded.split('\n'):
if line.startswith('#') == False:
stripped.append(line)
# create database
csr_db = csv.reader(stripped)
for row in csr_db:
if len(row) > 1:
if 'csr_register' in row[0]:
self.registers[row[1]] = row[2]
if 'memory_region' in row[0]:
self.regions[row[1]] = [row[2], row[3]]
if 'git_rev' in row[0]:
self.gitrev = row[1]
sys.stderr.write("Using SoC {} registers\n".format(self.gitrev))
def auto_int(x):
return int(x, 0)
def main():
parser = argparse.ArgumentParser(description="Pipe TRNG data out of a Xous 0.8/0.9 Precusor that is configured to run the test server")
parser.add_argument(
"--peek", required=False, help="Inspect an address", type=auto_int, metavar=('ADDR')
)
parser.add_argument(
"--poke", required=False, help="Write to an address", type=auto_int, nargs=2, metavar=('ADDR', 'DATA')
)
parser.add_argument(
"--check-poke", required=False, action='store_true', help="Read data before and after the poke"
)
parser.add_argument(
"--config", required=False, help="Print the descriptor", action='store_true'
)
args = parser.parse_args()
dev = usb.core.find(idProduct=0x5bf0, idVendor=0x1209)
if dev is None:
raise ValueError('Precursor device not found')
dev.set_configuration()
if args.config:
cfg = dev.get_active_configuration()
sys.stderr.write(str(cfg))
sys.stderr.write("\n")
pc_usb = PrecursorUsb(dev)
if args.peek:
pc_usb.peek(args.peek, display=True)
# sys.stderr.write(burst_read(dev, args.peek, 256).hex())
exit(0)
if args.poke:
addr, data = args.poke
pc_usb.poke(addr, data, check=args.check_poke, display=True)
# import os
# d = bytearray(os.urandom(8000))
# burst_write(dev, addr, d)
# r = burst_read(dev, addr, 8000)
# sys.stderr.write(r.hex())
# if d != r:
# sys.stderr.write("mismatch")
# else:
# sys.stderr.write("match")
exit(0)
pc_usb.load_csrs() # prime the CSR values
if "v0.8" in pc_usb.gitrev:
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
elif "v0.9" in pc_usb.gitrev:
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
elif args.force == True:
# try the v0.8 offsets
LOC_SOC = 0x00000000
LOC_STAGING= 0x00280000
LOC_LOADER = 0x00500000
LOC_KERNEL = 0x00980000
LOC_WF200 = 0x07F80000
LOC_EC = 0x07FCE000
LOC_AUDIO = 0x06340000
LEN_AUDIO = 0x01C40000
else:
sys.stderr.write("SoC is from an unknow rev '{}', use --force to continue anyways with v0.8 firmware offsets".format(pc_usb.load_csrs()))
exit(1)
vexdbg_addr = int(pc_usb.regions['vexriscv_debug'][0], 0)
pc_usb.vexdbg_addr = vexdbg_addr
#pc_usb.ping_wdt()
#sys.stderr.write("Halting CPU.")
#pc_usb.poke(vexdbg_addr, 0x00020000)
messible2_in = pc_usb.register('messible2_in')
messible_out = pc_usb.register('messible_out')
RAM_A = 0x4080_0000
RAM_B = 0x4088_0000
BURST_LEN = 512 * 1024
TIMEOUT = 30.0
phase = 0
last_phase = 0
blocks = 0
while True:
start_time = time.time()
sys.stderr.write("at phase {}, waiting for next buffer\n".format(phase))
while True:
remote_phase = pc_usb.peek(messible_out)
if remote_phase > phase:
break
time.sleep(0.5)
if time.time() > (start_time + TIMEOUT):
try:
pc_usb.poke(pc_usb.register('reboot_soc_reset'), 0xac, display=False)
except usb.core.USBError:
pass # we expect an error because we reset the SOC and that includes the USB core
time.sleep(3.0)
dev = usb.core.find(idProduct=0x5bf0, idVendor=0x1209)
dev.set_configuration()
pc_usb = PrecursorUsb(dev)
pc_usb.load_csrs() # prime the CSR values
#pc_usb.poke(vexdbg_addr, 0x02000000) # maybe the CPU is still halted, try resuming it
sys.stderr.write("timeout & reset\n")
phase = 0
last_phase = 0
remote_phase = pc_usb.peek(messible_out)
break
phase = remote_phase
pc_usb.poke(messible2_in, phase)
if last_phase != phase:
if (phase % 2) == 1:
sys.stderr.write("phase {} fetching RAM_A\n".format(phase))
page = pc_usb.burst_read(RAM_A, BURST_LEN)
sys.stdout.buffer.write(page)
sys.stderr.write("got page A {}\n".format(len(page)))
else:
sys.stderr.write("phase {} fetching RAM_B\n".format(phase))
page = pc_usb.burst_read(RAM_B, BURST_LEN)
sys.stdout.buffer.write(page)
sys.stderr.write("got page B {}\n".format(len(page)))
blocks += 1
else:
sys.stderr.write("phase didn't increment, not transferring identical block")
sys.stderr.write("at block {}".format(blocks))
#sys.stderr.write("Resuming CPU.")
#pc_usb.poke(vexdbg_addr, 0x02000000)
#sys.stderr.write("Resetting SOC...")
#try:
# pc_usb.poke(pc_usb.register('reboot_soc_reset'), 0xac, display=False)
#except usb.core.USBError:
# pass # we expect an error because we reset the SOC and that includes the USB core
# sys.stderr.write("If you need to run more commands, please unplug and re-plug your device in, as the Precursor USB core was just reset")
if __name__ == "__main__":
main()
exit(0)
|
663e1b41bb2150055a61534464c3016787cb1752
|
d46844ac1c4230579d6c87d800e07fb41bc99592
|
/pwncat/modules/linux/enumerate/system/process.py
|
eb7b9c5d699856ff839fbc1d9fbae0051407c318
|
[
"MIT"
] |
permissive
|
calebstewart/pwncat
|
14ade3e424fb70ce3e62b8b5c5053959515799e7
|
37f04d4e16ff47c7fd70e95162f9fccd327cca7e
|
refs/heads/master
| 2023-08-14T04:27:04.773361
| 2022-03-21T20:35:00
| 2022-03-21T20:35:00
| 261,925,766
| 2,177
| 267
|
MIT
| 2023-05-19T04:33:17
| 2020-05-07T02:02:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,779
|
py
|
process.py
|
#!/usr/bin/env python3
import shlex
from typing import List
from pwncat.db import Fact
from pwncat.platform.linux import Linux
from pwncat.modules.enumerate import Schedule, EnumerateModule
class ProcessData(Fact):
"""A single process from the `ps` output"""
def __init__(self, source, uid, username, pid, ppid, argv):
super().__init__(source=source, types=["system.process"])
self.uid: int = uid
self.username: str = username
self.pid: int = pid
self.ppid: int = ppid
self.argv: List[str] = argv
def title(self, session):
if self.uid == 0:
color = "red"
elif self.uid < 1000:
color = "blue"
else:
color = "magenta"
# Color our current user differently
if self.uid == session.platform.getuid():
color = "lightblue"
result = f"[{color}]{self.username:>10s}[/{color}] "
result += f"[magenta]{self.pid:<7d}[/magenta] "
result += f"[lightblue]{self.ppid:<7d}[/lightblue] "
result += f"[cyan]{shlex.join(self.argv)}[/cyan]"
return result
class Module(EnumerateModule):
"""
Extract the currently running processes. This will parse the
process information and give you access to the user, parent
process, command line, etc as with the `ps` command.
This is only run once unless manually cleared.
"""
PROVIDES = ["system.process"]
PLATFORM = [Linux]
SCHEDULE = Schedule.ONCE
def enumerate(self, session):
try:
proc = session.platform.run(
"ps -eo pid,ppid,uid,user,command --no-header -ww",
capture_output=True,
text=True,
check=True,
)
if proc.stdout:
# Iterate over each process
for line in proc.stdout.split("\n"):
if line:
line = line.strip()
entities = line.split()
try:
pid, ppid, uid, username, *argv = entities
except ValueError:
# We couldn't parse some line for some reason?
continue
command = " ".join(argv)
# Kernel threads aren't helpful for us
if command.startswith("[") and command.endswith("]"):
continue
uid = int(uid)
pid = int(pid)
ppid = int(ppid)
yield ProcessData(self.name, uid, username, pid, ppid, argv)
except (FileNotFoundError, PermissionError):
return
|
edb65d807cd8b4bd3d90b99a512a318e04700baa
|
5130754859e274cd06f63260439e5203c2000a11
|
/stubs/firebase_admin/__init__.pyi
|
51fa236c8f67fb736878e35affa48a5fc66585db
|
[
"Apache-2.0"
] |
permissive
|
oppia/oppia
|
8ebc9c7c7f2b336e9a79ce04533abe3956f48cbe
|
d16fdf23d790eafd63812bd7239532256e30a21d
|
refs/heads/develop
| 2023-09-04T07:50:13.661276
| 2023-09-03T09:21:32
| 2023-09-03T09:21:32
| 40,687,563
| 6,172
| 4,666
|
Apache-2.0
| 2023-09-14T18:25:11
| 2015-08-14T00:16:14
|
Python
|
UTF-8
|
Python
| false
| false
| 384
|
pyi
|
__init__.pyi
|
from typing import Any, Dict, Optional
class App:
name: str = ...
credential: Optional[object] = ...
options: Optional[Dict[str, Any]] = ...
project_id: str = ...
def get_app(name: Optional[str] = ...) -> App: ...
def initialize_app(
credential: Optional[object] = ...,
options: Optional[Dict[str, Any]] = ...,
name: str = ...
) -> App: ...
|
ecf7fd991af47247f37908d33b884c98894d4a3e
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Tensorflow/source/tensorflow/contrib/timeseries/python/timeseries/model.py
|
b32b5c5494ae14187954b900119678a5b53a3602
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 35,555
|
py
|
model.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for time series models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
from tensorflow.contrib import layers
from tensorflow.contrib.timeseries.python.timeseries import math_utils
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import PredictionFeatures
from tensorflow.contrib.timeseries.python.timeseries.feature_keys import TrainEvalFeatures
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
ModelOutputs = collections.namedtuple( # pylint: disable=invalid-name
typename="ModelOutputs",
field_names=[
"loss", # The scalar value to be minimized during training.
"end_state", # A nested tuple specifying the model's state after
# running on the specified data
"predictions", # A dictionary of predictions, each with shape prefixed
# by the shape of `prediction_times`.
"prediction_times" # A [batch size x window size] integer Tensor
# indicating times for which values in `predictions`
# were computed.
])
class TimeSeriesModel(object):
"""Base class for creating generative time series models."""
__metaclass__ = abc.ABCMeta
def __init__(self,
num_features,
exogenous_feature_columns=None,
dtype=dtypes.float32):
"""Constructor for generative models.
Args:
num_features: Number of features for the time series
exogenous_feature_columns: A list of tf.contrib.layers.FeatureColumn
objects (for example tf.contrib.layers.embedding_column) corresponding
to exogenous features which provide extra information to the model but
are not part of the series to be predicted. Passed to
tf.contrib.layers.input_from_feature_columns.
dtype: The floating point datatype to use.
"""
if exogenous_feature_columns:
self._exogenous_feature_columns = exogenous_feature_columns
else:
self._exogenous_feature_columns = []
self.num_features = num_features
self.dtype = dtype
self._input_statistics = None
self._graph_initialized = False
self._stats_means = None
self._stats_sigmas = None
# TODO(allenl): Move more of the generic machinery for generating and
# predicting into TimeSeriesModel, and possibly share it between generate()
# and predict()
def generate(self, number_of_series, series_length,
model_parameters=None, seed=None):
"""Sample synthetic data from model parameters, with optional substitutions.
Returns `number_of_series` possible sequences of future values, sampled from
the generative model with each conditioned on the previous. Samples are
based on trained parameters, except for those parameters explicitly
overridden in `model_parameters`.
For distributions over future observations, see predict().
Args:
number_of_series: Number of time series to create.
series_length: Length of each time series.
model_parameters: A dictionary mapping model parameters to values, which
replace trained parameters when generating data.
seed: If specified, return deterministic time series according to this
value.
Returns:
A dictionary with keys TrainEvalFeatures.TIMES (mapping to an array with
shape [number_of_series, series_length]) and TrainEvalFeatures.VALUES
(mapping to an array with shape [number_of_series, series_length,
num_features]).
"""
raise NotImplementedError("This model does not support generation.")
def initialize_graph(self, input_statistics=None):
"""Define ops for the model, not depending on any previously defined ops.
Args:
input_statistics: A math_utils.InputStatistics object containing input
statistics. If None, data-independent defaults are used, which may
result in longer or unstable training.
"""
self._graph_initialized = True
self._input_statistics = input_statistics
if self._input_statistics:
self._stats_means, variances = (
self._input_statistics.overall_feature_moments)
self._stats_sigmas = math_ops.sqrt(variances)
def _scale_data(self, data):
"""Scale data according to stats (input scale -> model scale)."""
if self._input_statistics is not None:
return (data - self._stats_means) / self._stats_sigmas
else:
return data
def _scale_variance(self, variance):
"""Scale variances according to stats (input scale -> model scale)."""
if self._input_statistics is not None:
return variance / self._input_statistics.overall_feature_moments.variance
else:
return variance
def _scale_back_data(self, data):
"""Scale back data according to stats (model scale -> input scale)."""
if self._input_statistics is not None:
return (data * self._stats_sigmas) + self._stats_means
else:
return data
def _scale_back_variance(self, variance):
"""Scale back variances according to stats (model scale -> input scale)."""
if self._input_statistics is not None:
return variance * self._input_statistics.overall_feature_moments.variance
else:
return variance
def _check_graph_initialized(self):
if not self._graph_initialized:
raise ValueError(
"TimeSeriesModels require initialize_graph() to be called before "
"use. This defines variables and ops in the default graph, and "
"allows Tensor-valued input statistics to be specified.")
def define_loss(self, features, mode):
"""Default loss definition with state replicated across a batch.
Time series passed to this model have a batch dimension, and each series in
a batch can be operated on in parallel. This loss definition assumes that
each element of the batch represents an independent sample conditioned on
the same initial state (i.e. it is simply replicated across the batch). A
batch size of one provides sequential operations on a single time series.
More complex processing may operate instead on get_start_state() and
get_batch_loss() directly.
Args:
features: A dictionary (such as is produced by a chunker) with at minimum
the following key/value pairs (others corresponding to the
`exogenous_feature_columns` argument to `__init__` may be included
representing exogenous regressors):
TrainEvalFeatures.TIMES: A [batch size x window size] integer Tensor
with times for each observation. If there is no artificial chunking,
the window size is simply the length of the time series.
TrainEvalFeatures.VALUES: A [batch size x window size x num features]
Tensor with values for each observation.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL). For INFER,
see predict().
Returns:
A ModelOutputs object.
"""
self._check_graph_initialized()
start_state = math_utils.replicate_state(
start_state=self.get_start_state(),
batch_size=array_ops.shape(features[TrainEvalFeatures.TIMES])[0])
return self.get_batch_loss(features=features, mode=mode, state=start_state)
# TODO(vitalyk,allenl): Better documentation surrounding options for chunking,
# references to papers, etc.
@abc.abstractmethod
def get_start_state(self):
"""Returns a tuple of state for the start of the time series.
For example, a mean and covariance. State should not have a batch
dimension, and will often be TensorFlow Variables to be learned along with
the rest of the model parameters.
"""
pass
@abc.abstractmethod
def get_batch_loss(self, features, mode, state):
"""Return predictions, losses, and end state for a time series.
Args:
features: A dictionary with times, values, and (optionally) exogenous
regressors. See `define_loss`.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
state: Model-dependent state, each with size [batch size x ...]. The
number and type will typically be fixed by the model (for example a
mean and variance).
Returns:
A ModelOutputs object.
"""
pass
@abc.abstractmethod
def predict(self, features):
"""Returns predictions of future observations given an initial state.
Computes distributions for future observations. For sampled draws from the
model where each is conditioned on the previous, see generate().
Args:
features: A dictionary with at minimum the following key/value pairs
(others corresponding to the `exogenous_feature_columns` argument to
`__init__` may be included representing exogenous regressors):
PredictionFeatures.TIMES: A [batch size x window size] Tensor with
times to make predictions for. Times must be increasing within each
part of the batch, and must be greater than the last time `state` was
updated.
PredictionFeatures.STATE_TUPLE: Model-dependent state, each with size
[batch size x ...]. The number and type will typically be fixed by the
model (for example a mean and variance). Typically these will be the
end state returned by get_batch_loss, predicting beyond that data.
Returns:
A dictionary with model-dependent predictions corresponding to the
requested times. Keys indicate the type of prediction, and values have
shape [batch size x window size x ...]. For example state space models
return a "predicted_mean" and "predicted_covariance".
"""
pass
def _process_exogenous_features(self, times, features):
"""Create a single vector from exogenous features.
Args:
times: A [batch size, window size] vector of times for this batch,
primarily used to check the shape information of exogenous features.
features: A dictionary of exogenous features corresponding to the columns
in self._exogenous_feature_columns. Each value should have a shape
prefixed by [batch size, window size].
Returns:
A Tensor with shape [batch size, window size, exogenous dimension], where
the size of the exogenous dimension depends on the exogenous feature
columns passed to the model's constructor.
Raises:
ValueError: If an exogenous feature has an unknown rank.
"""
if self._exogenous_feature_columns:
exogenous_features_single_batch_dimension = {}
for name, tensor in features.items():
if tensor.get_shape().ndims is None:
# input_from_feature_columns does not support completely unknown
# feature shapes, so we save on a bit of logic and provide a better
# error message by checking that here.
raise ValueError(
("Features with unknown rank are not supported. Got shape {} for "
"feature {}.").format(tensor.get_shape(), name))
tensor_shape_dynamic = array_ops.shape(tensor)
tensor = array_ops.reshape(
tensor,
array_ops.concat([[tensor_shape_dynamic[0]
* tensor_shape_dynamic[1]],
tensor_shape_dynamic[2:]], axis=0))
# Avoid shape warnings when embedding "scalar" exogenous features (those
# with only batch and window dimensions); input_from_feature_columns
# expects input ranks to match the embedded rank.
if tensor.get_shape().ndims == 1:
exogenous_features_single_batch_dimension[name] = tensor[:, None]
else:
exogenous_features_single_batch_dimension[name] = tensor
embedded_exogenous_features_single_batch_dimension = (
layers.input_from_feature_columns(
columns_to_tensors=exogenous_features_single_batch_dimension,
feature_columns=self._exogenous_feature_columns,
trainable=True))
exogenous_regressors = array_ops.reshape(
embedded_exogenous_features_single_batch_dimension,
array_ops.concat(
[
array_ops.shape(times), array_ops.shape(
embedded_exogenous_features_single_batch_dimension)[1:]
],
axis=0))
exogenous_regressors.set_shape(times.get_shape().concatenate(
embedded_exogenous_features_single_batch_dimension.get_shape()[1:]))
exogenous_regressors = math_ops.cast(
exogenous_regressors, dtype=self.dtype)
else:
# Not having any exogenous features is a special case so that models can
# avoid superfluous updates, which may not be free of side effects due to
# bias terms in transformations.
exogenous_regressors = None
return exogenous_regressors
# TODO(allenl): Add a superclass of SequentialTimeSeriesModel which fuses
# filtering/prediction/exogenous into one step, and move looping constructs to
# that class.
class SequentialTimeSeriesModel(TimeSeriesModel):
"""Base class for recurrent generative models.
Models implementing this interface have three main functions, corresponding to
abstract methods:
_filtering_step: Updates state based on observations and computes a loss.
_prediction_step: Predicts a batch of observations and new model state.
_imputation_step: Updates model state across a gap.
_exogenous_input_step: Updates state to account for exogenous regressors.
Models may also specify a _window_initializer to prepare for a window of data.
See StateSpaceModel for a concrete example of a model implementing this
interface.
"""
def __init__(self,
train_output_names,
predict_output_names,
num_features,
normalize_features=False,
dtype=dtypes.float32,
exogenous_feature_columns=None,
exogenous_update_condition=None,
static_unrolling_window_size_threshold=None):
"""Initialize a SequentialTimeSeriesModel.
Args:
train_output_names: A list of products/predictions returned from
_filtering_step.
predict_output_names: A list of products/predictions returned from
_prediction_step.
num_features: Number of features for the time series
normalize_features: Boolean. If True, `values` are passed normalized to
the model (via self._scale_data). Scaling is done for the whole window
as a batch, which is slightly more efficient than scaling inside the
window loop. The model must then define _scale_back_predictions, which
may use _scale_back_data or _scale_back_variance to return predictions
to the input scale.
dtype: The floating point datatype to use.
exogenous_feature_columns: A list of tf.contrib.layers.FeatureColumn
objects. See `TimeSeriesModel`.
exogenous_update_condition: A function taking two Tensor arguments `times`
(shape [batch size]) and `features` (a dictionary mapping exogenous
feature keys to Tensors with shapes [batch size, ...]) and returning a
boolean Tensor with shape [batch size] indicating whether state should
be updated using exogenous features for each part of the batch. Where
it is False, no exogenous update is performed. If None (default),
exogenous updates are always performed. Useful for avoiding "leaky"
frequent exogenous updates when sparse updates are desired. Called
only during graph construction.
static_unrolling_window_size_threshold: Controls whether a `tf.while_loop`
is used when looping over a window of data. If
`static_unrolling_window_size_threshold` is None, a `tf.while_loop` is
always used. Otherwise it must be an integer, and the graph is
replicated for each step taken whenever the window size is less than
or equal to this value (if the window size is available in the static
shape information of the TrainEvalFeatures.TIMES feature). Static
unrolling generally decreases the per-step time for small window/batch
sizes, but increases graph construction time.
"""
super(SequentialTimeSeriesModel, self).__init__(
num_features=num_features, dtype=dtype,
exogenous_feature_columns=exogenous_feature_columns)
self._exogenous_update_condition = exogenous_update_condition
self._train_output_names = train_output_names
self._predict_output_names = predict_output_names
self._normalize_features = normalize_features
self._static_unrolling_window_size_threshold = (
static_unrolling_window_size_threshold)
def _scale_back_predictions(self, predictions):
"""Return a window of predictions to input scale.
Args:
predictions: A dictionary mapping from prediction names to Tensors.
Returns:
A dictionary with values corrected for input normalization (e.g. with
self._scale_back_mean and possibly self._scale_back_variance). May be a
mutated version of the argument.
"""
raise NotImplementedError(
"SequentialTimeSeriesModel normalized input data"
" (normalize_features=True), but no method was provided to transform "
"the predictions back to the input scale.")
@abc.abstractmethod
def _filtering_step(self, current_times, current_values, state, predictions):
"""Compute a single-step loss for a batch of data.
Args:
current_times: A [batch size] Tensor of times for each observation.
current_values: A [batch size] Tensor of values for each observation.
state: Model state, updated to current_times.
predictions: The outputs of _prediction_step
Returns:
A tuple of (updated state, outputs):
updated state: Model state taking current_values into account.
outputs: A dictionary of Tensors with keys corresponding to
self._train_output_names, plus a special "loss" key. The value
corresponding to "loss" is minimized during training. Other outputs
may include one-step-ahead predictions, for example a predicted
location and scale.
"""
pass
@abc.abstractmethod
def _prediction_step(self, current_times, state):
"""Compute a batch of single-step predictions.
Args:
current_times: A [batch size] Tensor of times for each observation.
state: Model state, imputed to one step before current_times.
Returns:
A tuple of (updated state, outputs):
updated state: Model state updated to current_times.
outputs: A dictionary of Tensors with keys corresponding to
self._predict_output_names.
"""
pass
@abc.abstractmethod
def _imputation_step(self, current_times, state):
"""Update model state across missing values.
Called to prepare model state for _filtering_step and _prediction_step.
Args:
current_times: A [batch size] Tensor; state will be imputed up to, but not
including, these timesteps.
state: The pre-imputation model state, Tensors with shape [batch size x
...].
Returns:
Updated/imputed model state, corresponding to `state`.
"""
pass
@abc.abstractmethod
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update state to account for exogenous regressors.
Args:
current_times: A [batch size] Tensor of times for the exogenous values
being input.
current_exogenous_regressors: A [batch size x exogenous input dimension]
Tensor of exogenous values for each part of the batch.
state: Model state, a possibly nested list of Tensors, each with shape
[batch size x ...].
Returns:
Updated model state, structure and shapes matching the `state` argument.
"""
pass
# TODO(allenl): Move regularization to a separate object (optional and
# configurable)
def _loss_additions(self, times, values, mode):
"""Additions to per-observation normalized loss, e.g. regularization.
Args:
times: A [batch size x window size] Tensor with times for each
observation.
values: A [batch size x window size x num features] Tensor with values for
each observation.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
Returns:
A scalar value to add to the per-observation normalized loss.
"""
del times, values, mode
return 0.
def _window_initializer(self, times, state):
"""Prepare for training or prediction on a window of data.
Args:
times: A [batch size x window size] Tensor with times for each
observation.
state: Model-dependent state, each with size [batch size x ...]. The
number and type will typically be fixed by the model (for example a
mean and variance).
Returns:
Nothing
"""
pass
def get_batch_loss(self, features, mode, state):
"""Calls self._filtering_step. See TimeSeriesModel.get_batch_loss."""
per_observation_loss, state, outputs = self.per_step_batch_loss(
features, mode, state)
# per_step_batch_loss returns [batch size, window size, ...] state, whereas
# get_batch_loss is expected to return [batch size, ...] state for the last
# element of a window
state = nest.pack_sequence_as(
state,
[state_element[:, -1] for state_element in nest.flatten(state)])
outputs["observed"] = features[TrainEvalFeatures.VALUES]
return ModelOutputs(
loss=per_observation_loss,
end_state=state,
predictions=outputs,
prediction_times=features[TrainEvalFeatures.TIMES])
def _apply_exogenous_update(
self, current_times, step_number, state, raw_features,
embedded_exogenous_regressors):
"""Performs a conditional state update based on exogenous features."""
if embedded_exogenous_regressors is None:
return state
else:
current_exogenous_regressors = embedded_exogenous_regressors[
:, step_number, :]
exogenous_updated_state = self._exogenous_input_step(
current_times=current_times,
current_exogenous_regressors=current_exogenous_regressors,
state=state)
if self._exogenous_update_condition is not None:
current_raw_exogenous_features = {
key: value[:, step_number] for key, value in raw_features.items()
if key not in [PredictionFeatures.STATE_TUPLE,
TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES]}
conditionally_updated_state_flat = []
for updated_state_element, original_state_element in zip(
nest.flatten(exogenous_updated_state),
nest.flatten(state)):
conditionally_updated_state_flat.append(
array_ops.where(
self._exogenous_update_condition(
times=current_times,
features=current_raw_exogenous_features),
updated_state_element,
original_state_element))
return nest.pack_sequence_as(state, conditionally_updated_state_flat)
else:
return exogenous_updated_state
def per_step_batch_loss(self, features, mode, state):
"""Computes predictions, losses, and intermediate model states.
Args:
features: A dictionary with times, values, and (optionally) exogenous
regressors. See `define_loss`.
mode: The tf.estimator.ModeKeys mode to use (TRAIN, EVAL, INFER).
state: Model-dependent state, each with size [batch size x ...]. The
number and type will typically be fixed by the model (for example a
mean and variance).
Returns:
A tuple of (loss, filtered_states, predictions)
loss: Average loss values across the batch.
filtered_states: For each Tensor in `state` with shape [batch size x
...], `filtered_states` has a Tensor with shape [batch size x window
size x ...] with filtered state for each part of the batch and
window.
predictions: A dictionary with model-dependent one-step-ahead (or
at-least-one-step-ahead with missing values) predictions, with keys
indicating the type of prediction and values having shape [batch
size x window size x ...]. For example state space models provide
"mean", "covariance", and "log_likelihood".
"""
self._check_graph_initialized()
times = math_ops.cast(features[TrainEvalFeatures.TIMES], dtype=dtypes.int64)
values = math_ops.cast(features[TrainEvalFeatures.VALUES], dtype=self.dtype)
if self._normalize_features:
values = self._scale_data(values)
exogenous_regressors = self._process_exogenous_features(
times=times,
features={key: value for key, value in features.items()
if key not in [TrainEvalFeatures.TIMES,
TrainEvalFeatures.VALUES]})
def _batch_loss_filtering_step(step_number, current_times, state):
"""Make a prediction and update it based on data."""
current_values = values[:, step_number, :]
state = self._apply_exogenous_update(
step_number=step_number, current_times=current_times, state=state,
raw_features=features,
embedded_exogenous_regressors=exogenous_regressors)
predicted_state, predictions = self._prediction_step(
current_times=current_times,
state=state)
filtered_state, outputs = self._filtering_step(
current_times=current_times,
current_values=current_values,
state=predicted_state,
predictions=predictions)
return filtered_state, outputs
state, outputs = self._state_update_loop(
times=times, state=state, state_update_fn=_batch_loss_filtering_step,
outputs=["loss"] + self._train_output_names)
outputs["loss"].set_shape(times.get_shape())
loss_sum = math_ops.reduce_sum(outputs["loss"])
per_observation_loss = (loss_sum / math_ops.cast(
math_ops.reduce_prod(array_ops.shape(times)), dtype=self.dtype))
per_observation_loss += self._loss_additions(times, values, mode)
# Since we have window-level additions to the loss, its per-step value is
# misleading, so we avoid returning it.
del outputs["loss"]
if self._normalize_features:
outputs = self._scale_back_predictions(outputs)
return per_observation_loss, state, outputs
def predict(self, features):
"""Calls self._prediction_step in a loop. See TimeSeriesModel.predict."""
predict_times = ops.convert_to_tensor(features[PredictionFeatures.TIMES],
dtypes.int64)
start_state = features[PredictionFeatures.STATE_TUPLE]
exogenous_regressors = self._process_exogenous_features(
times=predict_times,
features={
key: value
for key, value in features.items()
if key not in
[PredictionFeatures.TIMES, PredictionFeatures.STATE_TUPLE]
})
def _call_prediction_step(step_number, current_times, state):
state = self._apply_exogenous_update(
step_number=step_number, current_times=current_times, state=state,
raw_features=features,
embedded_exogenous_regressors=exogenous_regressors)
state, outputs = self._prediction_step(
current_times=current_times, state=state)
return state, outputs
_, predictions = self._state_update_loop(
times=predict_times, state=start_state,
state_update_fn=_call_prediction_step,
outputs=self._predict_output_names)
if self._normalize_features:
predictions = self._scale_back_predictions(predictions)
return predictions
class _FakeTensorArray(object):
"""An interface for Python lists that is similar to TensorArray.
Used for easy switching between static and dynamic looping.
"""
def __init__(self):
self.values = []
def write(self, unused_position, value):
del unused_position
self.values.append(value)
return self
def _state_update_loop(self, times, state, state_update_fn, outputs):
"""Iterates over `times`, calling `state_update_fn` to collect outputs.
Args:
times: A [batch size x window size] Tensor of integers to iterate over.
state: A list of model-specific state Tensors, each with shape [batch size
x ...].
state_update_fn: A callback taking the following arguments
step_number; A scalar integer Tensor indicating the current position
in the window.
current_times; A [batch size] vector of Integers indicating times
for each part of the batch.
state; Current model state.
It returns a tuple of (updated state, output_values), output_values
being a dictionary of Tensors with keys corresponding to `outputs`.
outputs: A list of strings indicating values which will be saved while
iterating. Must match the keys of the dictionary returned by
state_update_fn.
Returns:
A tuple of (state, output_dict)
state: The final model state.
output_dict: A dictionary of outputs corresponding to those specified in
`outputs` and computed in state_update_fn.
"""
times = ops.convert_to_tensor(times, dtype=dtypes.int64)
window_static_shape = times.get_shape()[1].value
if self._static_unrolling_window_size_threshold is None:
static_unroll = False
else:
# The user has specified a threshold for static loop unrolling.
if window_static_shape is None:
# We don't have static shape information for the window size, so dynamic
# looping is our only option.
static_unroll = False
elif window_static_shape <= self._static_unrolling_window_size_threshold:
# The threshold is satisfied; unroll statically
static_unroll = True
else:
# A threshold was set but not satisfied
static_unroll = False
self._window_initializer(times, state)
def _run_condition(step_number, *unused):
del unused # not part of while loop run condition
return math_ops.less(step_number, window_size)
def _state_update_step(
step_number, state, state_accumulators, output_accumulators,
reuse=False):
"""Impute, then take one state_update_fn step, accumulating outputs."""
with variable_scope.variable_scope("state_update_step", reuse=reuse):
current_times = times[:, step_number]
state = self._imputation_step(current_times=current_times, state=state)
output_accumulators_dict = {
accumulator_key: accumulator
for accumulator_key, accumulator
in zip(outputs, output_accumulators)}
step_state, output_values = state_update_fn(
step_number=step_number,
current_times=current_times,
state=state)
assert set(output_values.keys()) == set(outputs)
new_output_accumulators = []
for output_key in outputs:
accumulator = output_accumulators_dict[output_key]
output_value = output_values[output_key]
new_output_accumulators.append(
accumulator.write(step_number, output_value))
flat_step_state = nest.flatten(step_state)
assert len(state_accumulators) == len(flat_step_state)
new_state_accumulators = []
new_state_flat = []
for step_state_value, state_accumulator, original_state in zip(
flat_step_state, state_accumulators, nest.flatten(state)):
# Make sure the static shape information is complete so while_loop
# does not complain about shape information changing.
step_state_value.set_shape(original_state.get_shape())
new_state_flat.append(step_state_value)
new_state_accumulators.append(state_accumulator.write(
step_number, step_state_value))
step_state = nest.pack_sequence_as(state, new_state_flat)
return (step_number + 1, step_state,
new_state_accumulators, new_output_accumulators)
window_size = array_ops.shape(times)[1]
def _window_size_tensor_array(dtype):
if static_unroll:
return self._FakeTensorArray()
else:
return tensor_array_ops.TensorArray(
dtype=dtype, size=window_size, dynamic_size=False)
initial_loop_arguments = [
array_ops.zeros([], dtypes.int32),
state,
[_window_size_tensor_array(element.dtype)
for element in nest.flatten(state)],
[_window_size_tensor_array(self.dtype) for _ in outputs]]
if static_unroll:
arguments = initial_loop_arguments
for step_number in range(times.get_shape()[1].value):
arguments = _state_update_step(
array_ops.constant(step_number, dtypes.int32), *arguments[1:],
reuse=(step_number > 0)) # Variable sharing between steps
else:
arguments = control_flow_ops.while_loop(
cond=_run_condition,
body=_state_update_step,
loop_vars=initial_loop_arguments)
(_, _, state_loop_result, outputs_loop_result) = arguments
def _stack_and_transpose(tensor_array):
"""Stack and re-order the dimensions of a TensorArray."""
if static_unroll:
return array_ops.stack(tensor_array.values, axis=1)
else:
# TensorArrays from while_loop stack with window size as the first
# dimension, so this function swaps it and the batch dimension to
# maintain the [batch x window size x ...] convention used elsewhere.
stacked = tensor_array.stack()
return array_ops.transpose(
stacked,
perm=array_ops.concat([[1, 0], math_ops.range(
2, array_ops.rank(stacked))], 0))
outputs_dict = {output_key: _stack_and_transpose(output)
for output_key, output
in zip(outputs, outputs_loop_result)}
full_state = nest.pack_sequence_as(
state,
[_stack_and_transpose(state_element)
for state_element in state_loop_result])
return full_state, outputs_dict
|
e27395482725a67e5c5b5d64b5a3e0c2f7c0be9a
|
b8cc7f50628eb2f6b4fdbd66eae5a1aa502cc309
|
/locale/Locale.py
|
1c7c9b7c629a81f5a8b7de8413f30cc190a52cd2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-scintilla",
"MIT",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
zufuliu/notepad2
|
ad82f2baaf60cd9efcbf3af57d2e2f381f961f0b
|
6296f4a8567d2c22690be0cafcdf91a0fea60cc7
|
refs/heads/main
| 2023-08-29T03:45:42.292846
| 2023-08-28T10:33:46
| 2023-08-28T10:33:46
| 79,987,996
| 2,154
| 227
|
NOASSERTION
| 2023-08-11T23:59:02
| 2017-01-25T06:07:49
|
C++
|
UTF-8
|
Python
| false
| false
| 14,852
|
py
|
Locale.py
|
#!/usr/bin/env python3
import sys
import os.path
import re
import shutil
import uuid
import subprocess
app = os.path.basename(__file__)
localeDir = os.getcwd()
notepad2_src = os.path.abspath('../src/Notepad2.rc')
metapath_src = os.path.abspath('../metapath/src/metapath.rc')
def get_available_locales():
result = []
with os.scandir(localeDir) as it:
for entry in it:
if entry.is_dir() and entry.name[:2].islower():
result.append(entry.name)
result.sort()
return result
def get_project_guid():
return '{' + str(uuid.uuid4()).upper() + '}'
def patch_vc_project_file(path, src_lang, language):
with open(path, encoding='utf-8', newline='\n') as fd:
doc = fd.read()
# change output folder
doc = doc.replace(f'\\{src_lang}\\', f'\\{language}\\')
# change project GUID
guid = get_project_guid()
doc = re.sub(r'(<ProjectGuid>)(.+)(</ProjectGuid>)', r'\1' + guid + r'\3', doc)
with open(path, 'w', encoding='utf-8', newline='\n') as fd:
fd.write(doc)
def update_resource_include_path(path, metapath):
with open(path, encoding='utf-8', newline='\n') as fd:
doc = fd.read()
if metapath:
# resource path
doc = doc.replace(r'..\\res', r'..\\..\\metapath\\res')
# include path
doc = re.sub(r'^(#include\s+")(.+)(")', r'\1../../metapath/src/\2\3', doc, flags=re.MULTILINE)
else:
# resource path
doc = doc.replace(r'..\\res', r'..\\..\\res')
# include path
doc = re.sub(r'^(#include\s+")(.+)(")', r'\1../../src/\2\3', doc, flags=re.MULTILINE)
# string table for lexers and styles
doc = re.sub(r'^//(#include\s+")(.+)(")', r'\1../../src/\2\3', doc, flags=re.MULTILINE)
doc = re.sub(r'^#if\s+0\s*//\s*(NP2_ENABLE_LOCALIZE\w+)', r'#if \1', doc, flags=re.MULTILINE)
with open(path, 'w', encoding='utf-8', newline='\n') as fd:
fd.write(doc)
def make_new_localization(language):
print(f'{app}: make new localization for {language}.')
target = os.path.join(localeDir, language)
if not os.path.exists(target):
os.makedirs(target)
metapath_dest = os.path.join(target, 'metapath.rc')
notepad2_dest = os.path.join(target, 'Notepad2.rc')
shutil.copyfile(metapath_src, metapath_dest)
shutil.copyfile(notepad2_src, notepad2_dest)
update_resource_include_path(metapath_dest, True)
update_resource_include_path(notepad2_dest, False)
src_lang = 'zh-Hans'
src_folder = os.path.join(localeDir, src_lang)
metapath_vcxproj_src = os.path.join(src_folder, f'metapath({src_lang}).vcxproj')
notepad2_vcxproj_src = os.path.join(src_folder, f'Notepad2({src_lang}).vcxproj')
metapath_vcxproj = os.path.join(target, f'metapath({language}).vcxproj')
notepad2_vcxproj = os.path.join(target, f'Notepad2({language}).vcxproj')
shutil.copyfile(metapath_vcxproj_src, metapath_vcxproj)
shutil.copyfile(metapath_vcxproj_src + '.filters', metapath_vcxproj + '.filters')
shutil.copyfile(notepad2_vcxproj_src, notepad2_vcxproj)
shutil.copyfile(notepad2_vcxproj_src + '.filters', notepad2_vcxproj + '.filters')
patch_vc_project_file(metapath_vcxproj, src_lang, language)
patch_vc_project_file(notepad2_vcxproj, src_lang, language)
metapath_dest = os.path.basename(metapath_dest)
notepad2_dest = os.path.basename(notepad2_dest)
metapath_vcxproj = os.path.basename(metapath_vcxproj)
notepad2_vcxproj = os.path.basename(notepad2_vcxproj)
print(f"""{app}: resources and projects added for {language}.
Please manually update language tags in {metapath_dest} and {notepad2_dest},
and open Locale.sln with Visual Studio to add project {metapath_vcxproj} and {notepad2_vcxproj}.""")
def restore_resource_include_path(path, metapath):
with open(path, encoding='utf-8', newline='\n') as fd:
doc = fd.read()
if metapath:
# include path
doc = doc.replace('../../metapath/src/', '')
# resource path
doc = doc.replace(r'..\\metapath\\', '')
else:
# include path
doc = doc.replace('../../src/', '')
# resource path
doc = doc.replace(r'..\\..\\res', r'..\\res')
with open(path, 'w', encoding='utf-8', newline='\n') as fd:
fd.write(doc)
def copy_back_localized_resources(language):
print(f'{app}: copy back localized resources for {language}.')
backupDir = os.path.join(localeDir, 'en')
if os.path.exists(backupDir):
if language != 'en':
print(f"""{app}: please delete the backup folder {backupDir}
on first run to ensure proper backup for English resources.""")
else:
os.makedirs(backupDir)
shutil.copyfile(metapath_src, os.path.join(backupDir, 'metapath.rc'))
shutil.copyfile(notepad2_src, os.path.join(backupDir, 'Notepad2.rc'))
folder = os.path.join(localeDir, language)
shutil.copyfile(os.path.join(folder, 'metapath.rc'), metapath_src)
shutil.copyfile(os.path.join(folder, 'Notepad2.rc'), notepad2_src)
restore_resource_include_path(metapath_src, True)
restore_resource_include_path(notepad2_src, False)
print(f"""{app}: resources for building standalone localized program for {language} are ready.
you can copy English resources back by run: {app} back en""")
class StringExtractor:
def reset(self, path, reversion):
self.path = path
self.reversion = reversion
self.changed_lines = set()
if reversion:
self.find_changed_lines(reversion)
if not self.changed_lines:
return False
with open(path, encoding='utf-8', newline='\n') as fd:
doc = fd.read()
self.lines = doc.splitlines()
return len(self.lines)
def find_changed_lines(self, reversion):
cmd = ['git', 'diff', '--no-color', '--unified=0', '--text', reversion, self.path]
result = subprocess.run(cmd, capture_output=True, check=True, encoding='utf-8')
if result.stderr:
print(result.stderr, file=sys.stderr)
items = re.findall(r'^@@\s+\-\d+(,\d+)?\s+\+(\d+)(,\d+)?\s+@@', result.stdout, re.MULTILINE)
if not items:
return
for item in items:
_, line, count = item
line = int(line)
count = int(count[1:]) if count else 0
if count:
self.changed_lines.update(range(line, line + count))
else:
self.changed_lines.add(line)
# reversion time
cmd =['git', 'show', '--no-patch', '--no-notes', "--pretty='%ci'", reversion]
result = subprocess.run(cmd, capture_output=True, check=True, encoding='utf-8')
if result.stderr:
print(result.stderr, file=sys.stderr)
items = result.stdout.replace("'", '').split()[:2]
self.reversion = f"{reversion} {' '.join(items)}".strip()
def is_line_changed(self, start, end):
if not self.changed_lines:
return True
while start <= end:
if start in self.changed_lines:
return True
start += 1
return False
def match_line(self, line, word):
if line.startswith(word):
ch = ' ' if len(line) == len(word) else line[len(word)]
return ch.isspace() or ch == '/'
return False
def scan_string(self, line, escape_sequence, format_specifier, access_key, start):
index = 0
if start:
# identifier "string"
index = line.find('"')
if index <= 0:
return '', 0, 0, False
length = len(line)
begin = index
if start:
index += 1
stop = False
while index < length:
ch = line[index]
index += 1
if ch == '\\':
ch = line[index] if index < length else ''
end = index + 1
if ch and ch in 'abfnrtvxu':
if ch == 'x':
end += 2
elif ch == 'u':
end += 4
sequence = line[index - 1:end]
escape_sequence.add(sequence)
index = end
elif ch == '&':
ch = line[index]
if ch == '&':
index += 1
elif not ch.isspace():
access_key.append(ch.upper())
elif ch == '%':
ch = line[index]
# we only use '%s' in resource files
if ch == 's':
format_specifier.add('%' + ch)
elif ch == '"':
if index < length and line[index] == ch:
index += 1
else:
stop = True
break
value = line[begin:index]
return value, begin, index, stop
def build_hint(self, escape_sequence, format_specifier, access_key):
hint = ''
if access_key:
hint += ', access key: ' + ', '.join(access_key)
if escape_sequence:
hint += ', escape sequence: ' + ', '.join(sorted(escape_sequence))
if format_specifier:
hint += ', format specifier: ' + ', '.join(sorted(format_specifier))
return hint
def parse_resource_item(self, lineno, line, block_items):
if not self.is_line_changed(lineno, lineno):
return None
escape_sequence = set()
format_specifier = set()
access_key = []
value, begin, index, _ = self.scan_string(line, escape_sequence, format_specifier, access_key, True)
if not any(ch.isalpha() for ch in value):
return None
# type "string", id
word = line[:begin].strip()
if not word.isidentifier():
return None
rcid = ''
begin = line.find(',', index)
if begin > 0:
index = line.find(',', begin + 1)
if index > 0:
rcid = line[begin + 1:index].strip()
else:
rcid = line[begin + 1:].strip()
assert rcid.isidentifier()
if word == 'CAPTION':
return f'{word} {value}'
if len(access_key) > 1:
print(f'multiple access keys {lineno} {word} {rcid}', access_key)
comment = f'// {lineno} {word} {rcid}'.strip()
comment += self.build_hint(escape_sequence, format_specifier, access_key)
block_items.append({
'value': value,
'comment': comment
})
return None
def parse_string_table_item(self, lineno, line, block_items):
# id "multiline string"
escape_sequence = set()
format_specifier = set()
access_key = []
value, begin, index, stop = self.scan_string(line, escape_sequence, format_specifier, access_key, True)
rcid = line[:begin].strip()
assert rcid.isidentifier()
result = [value]
start = lineno
while not stop:
line = self.lines[lineno]
lineno += 1
value, begin, index, stop = self.scan_string(line, escape_sequence, format_specifier, access_key, False)
result.append(value)
if not self.is_line_changed(start, lineno):
return lineno
value = '\n'.join(result)
if not any(ch.isalpha() for ch in value):
return lineno
if len(access_key) > 1:
print(f'multiple access keys {start} {rcid}', access_key)
comment = f'// {start} {rcid}'
comment += self.build_hint(escape_sequence, format_specifier, access_key)
block_items.append({
'value': value,
'comment': comment
})
return lineno
def extract(self, path, reversion, out_path=None):
if not self.reset(path, reversion):
return
Block_None = 0
Block_Menu = 1
Block_DialogEx = 2
Block_StringTable = 3
Block_Ignore = 4
block_type = Block_None
block_name = ''
block_caption = ''
begin = 0
block_begin = 0
block_items = []
lineno = 0
line_count = len(self.lines)
string_list = []
while lineno < line_count:
line = self.lines[lineno]
line = line.strip()
lineno += 1
if not line or line.startswith('//') or line.startswith('#'):
continue
if block_type == Block_None:
begin = 0
if self.match_line(line, 'STRINGTABLE'):
block_type = Block_StringTable
block_name = 'STRINGTABLE'
else:
items = line.split()
if len(items) >= 2:
word = items[1]
if word == 'MENU':
block_type = Block_Menu
block_name = ' '.join(items[:2])
elif word == 'DIALOGEX':
block_type = Block_DialogEx
block_name = ' '.join(items[:2])
elif word in ('ACCELERATORS', 'DESIGNINFO', 'TEXTINCLUDE'):
block_type = Block_Ignore
if block_type != Block_None:
block_begin = lineno
block_items = []
block_caption = ''
elif self.match_line(line, 'BEGIN'):
begin += 1
elif self.match_line(line, 'END'):
begin -= 1
if begin <= 0:
block_type = Block_None
if block_items:
string_list.append({
'name': block_name,
'comment': f'// line {block_begin} - {lineno}',
'caption': block_caption,
'items': block_items
})
block_items = []
elif block_type != Block_Ignore:
try:
if block_type in (Block_Menu, Block_DialogEx):
caption = self.parse_resource_item(lineno, line, block_items)
if caption:
block_caption = caption
elif block_type == Block_StringTable:
lineno = self.parse_string_table_item(lineno, line, block_items)
except Exception:
print(f'parse {block_type} {block_name} fail at {lineno} for {self.path}')
raise
if string_list:
self.save(string_list, out_path)
def save(self, string_list, out_path=None):
if not out_path:
path, ext = os.path.splitext(self.path)
out_path = path + '-string' + ext
print('save:', out_path)
with open(out_path, 'w', encoding='utf-8') as fd:
fd.write("//! Ignore line starts with //, it's a comment line.\n")
fd.write("//! Please don't translate escape sequence or format specifiers.\n")
if self.reversion:
fd.write("//! Updated strings since: " + self.reversion + '\n')
fd.write('\n')
for block in string_list:
fd.write(block['comment'] + '\n')
fd.write(block['name'] + '\n')
caption = block['caption']
if caption:
fd.write(caption + '\n')
fd.write('BEGIN' + '\n')
for item in block['items']:
fd.write('\t' + item['comment'] + '\n')
fd.write('\t' + item['value'] + '\n')
fd.write('END' + '\n\n')
def extract_resource_string(language, reversion):
print(f'{app}: extract updated string for {language} since {reversion}.')
extractor = StringExtractor()
if language == 'en':
extractor.extract(metapath_src, reversion)
extractor.extract(notepad2_src, reversion)
else:
folder = os.path.join(localeDir, language)
path = os.path.join(folder, 'metapath.rc')
extractor.extract(path, reversion)
path = os.path.join(folder, 'Notepad2.rc')
extractor.extract(path, reversion)
def show_help():
print(f"""Usage: {app} action language [reversion]
action:
new create a new localization for specific language.
back prepare building standalone localized program for specific language,
copy back localized resources to overwrite English resources.
English resources are copied into en folder when the folder does not exist.
string extract all resource string or updated strings since specific reversion.
""")
def main(argv):
if len(argv) < 3:
show_help()
return
action = argv[1]
language = argv[2]
availableLocales = get_available_locales()
if action == 'new':
if language in availableLocales:
print(f'{app}: language {language} already localized.')
return
make_new_localization(language)
elif action == 'back':
if language not in availableLocales:
print(f'{app}: language {language} not localized [{", ".join(availableLocales)}].')
return
copy_back_localized_resources(language)
elif action == 'string':
if language != 'en' and language not in availableLocales:
print(f'{app}: language {language} not localized [{", ".join(availableLocales)}].')
return
reversion = argv[3] if len(argv) > 3 else ''
extract_resource_string(language, reversion)
else:
show_help()
if __name__ == '__main__':
main(sys.argv)
|
7863dde21dfbaa8f6b213507664d96841ae7c5ac
|
cb6d0a660cfcb28ee9e8a1c0266925f8f541edfb
|
/tests/python/test_lpm_trie.py
|
02d9d83baa99fa3b6d61d3d4c8d6fa4846dc207e
|
[
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
iovisor/bcc
|
0e002769364523caeb731216021b0a3c881a723f
|
ec49363e2e9daec026ee6cae4c5fc316f8fab0ff
|
refs/heads/master
| 2023-09-03T22:37:47.238198
| 2023-08-31T14:44:55
| 2023-09-01T11:21:30
| 34,921,116
| 18,467
| 3,907
|
Apache-2.0
| 2023-09-13T21:22:53
| 2015-05-01T19:52:32
|
C
|
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
test_lpm_trie.py
|
#!/usr/bin/env python3
# Copyright (c) 2017 Facebook, Inc.
# Licensed under the Apache License, Version 2.0 (the "License")
import ctypes as ct
import os
from unittest import main, skipUnless, TestCase
from utils import kernel_version_ge
from bcc import BPF
from netaddr import IPAddress
class KeyV4(ct.Structure):
_fields_ = [("prefixlen", ct.c_uint),
("data", ct.c_ubyte * 4)]
class KeyV6(ct.Structure):
_fields_ = [("prefixlen", ct.c_uint),
("data", ct.c_ushort * 8)]
@skipUnless(kernel_version_ge(4, 11), "requires kernel >= 4.11")
class TestLpmTrie(TestCase):
def test_lpm_trie_v4(self):
test_prog1 = b"""
struct key_v4 {
u32 prefixlen;
u32 data[4];
};
BPF_LPM_TRIE(trie, struct key_v4, int, 16);
"""
b = BPF(text=test_prog1)
t = b[b"trie"]
k1 = KeyV4(24, (192, 168, 0, 0))
v1 = ct.c_int(24)
t[k1] = v1
k2 = KeyV4(28, (192, 168, 0, 0))
v2 = ct.c_int(28)
t[k2] = v2
k = KeyV4(32, (192, 168, 0, 15))
self.assertEqual(t[k].value, 28)
k = KeyV4(32, (192, 168, 0, 127))
self.assertEqual(t[k].value, 24)
with self.assertRaises(KeyError):
k = KeyV4(32, (172, 16, 1, 127))
v = t[k]
def test_lpm_trie_v6(self):
test_prog1 = b"""
struct key_v6 {
u32 prefixlen;
u32 data[4];
};
BPF_LPM_TRIE(trie, struct key_v6, int, 16);
"""
b = BPF(text=test_prog1)
t = b[b"trie"]
k1 = KeyV6(64, IPAddress('2a00:1450:4001:814:200e::').words)
v1 = ct.c_int(64)
t[k1] = v1
k2 = KeyV6(96, IPAddress('2a00:1450:4001:814::200e').words)
v2 = ct.c_int(96)
t[k2] = v2
k = KeyV6(128, IPAddress('2a00:1450:4001:814::1024').words)
self.assertEqual(t[k].value, 96)
k = KeyV6(128, IPAddress('2a00:1450:4001:814:2046::').words)
self.assertEqual(t[k].value, 64)
with self.assertRaises(KeyError):
k = KeyV6(128, IPAddress('2a00:ffff::').words)
v = t[k]
if __name__ == "__main__":
main()
|
1a002de22b81fffb0bc6a3e65df640fc47a446fa
|
e8b38b8dfa348ff006eb197a7906ca8e491a23dc
|
/tests/epyccel/modules/call_user_defined_funcs.py
|
0773f5e116fbaa16aa5e42d8196bf2420589f5d1
|
[
"MIT"
] |
permissive
|
pyccel/pyccel
|
d79a81dbdff1172839a6a1227abfcc1f97e6c97b
|
1896b761ba662c90b14c195bbb6eb5cddc57cbfc
|
refs/heads/devel
| 2023-08-30T12:15:25.244401
| 2023-08-28T09:31:32
| 2023-08-28T09:31:32
| 100,463,736
| 307
| 39
|
MIT
| 2023-09-14T19:29:26
| 2017-08-16T07:59:14
|
Python
|
UTF-8
|
Python
| false
| false
| 788
|
py
|
call_user_defined_funcs.py
|
# pylint: disable=missing-function-docstring, missing-module-docstring
# This module test call user defined functions
# through nested functions
def do_nothing():
x = 0
x *= 0
def not_change(s : 'float'):
s *= s
def my_div(a : 'float', b : 'float'):
return a / b
def my_mult(a : 'float', b : 'float'):
return a * b
def my_pi():
return 3.141592653589793
def my_cub(r : 'float'):
return r * r * r
def circle_volume(radius : 'float'):
do_nothing()
volume = my_mult(my_mult(my_div(3. , 4.), my_pi()), my_cub(radius))
not_change(volume)
return volume
def arr_mult_scalar(T: 'int[:]', t: int = 13):
x = T * t
return x
def alias(T: 'int[:]', t: int):
x = arr_mult_scalar(T, t=t)
y = arr_mult_scalar(t=t, T=T)
return x, y
|
8ddd1dad90b3d618e546d388ec887c9cd36b5708
|
b60f84142c6a704621a49522ed1364439a38be19
|
/litedram/core/multiplexer.py
|
a0e9df6760689f186ab3c8dea67b52ecdf7f574f
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
enjoy-digital/litedram
|
cf0ad16a6a7b22b5b5257af313ed59f3f2bba477
|
ed81c8cc861ae14e44b2df83adce3ff39ca7f7ac
|
refs/heads/master
| 2023-09-04T06:27:33.897454
| 2023-08-29T14:48:10
| 2023-08-29T14:48:10
| 31,147,284
| 321
| 111
|
NOASSERTION
| 2023-07-07T06:53:26
| 2015-02-22T00:43:35
|
Python
|
UTF-8
|
Python
| false
| false
| 16,755
|
py
|
multiplexer.py
|
#
# This file is part of LiteDRAM.
#
# Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
# Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2018 John Sully <john@csquare.ca>
# SPDX-License-Identifier: BSD-2-Clause
"""LiteDRAM Multiplexer."""
import math
from functools import reduce
from operator import or_, and_
from migen import *
from migen.genlib.roundrobin import *
from migen.genlib.coding import Decoder
from litex.soc.interconnect import stream
from litex.soc.interconnect.csr import AutoCSR
from litedram.common import *
from litedram.core.bandwidth import Bandwidth
# _CommandChooser ----------------------------------------------------------------------------------
class _CommandChooser(Module):
"""Arbitrates between requests, filtering them based on their type
Uses RoundRobin to choose current request, filters requests based on
`want_*` signals.
Parameters
----------
requests : [Endpoint(cmd_request_rw_layout), ...]
Request streams to consider for arbitration
Attributes
----------
want_reads : Signal, in
Consider read requests
want_writes : Signal, in
Consider write requests
want_cmds : Signal, in
Consider command requests (without ACT)
want_activates : Signal, in
Also consider ACT commands
cmd : Endpoint(cmd_request_rw_layout)
Currently selected request stream (when ~cmd.valid, cas/ras/we are 0)
"""
def __init__(self, requests):
self.want_reads = Signal()
self.want_writes = Signal()
self.want_cmds = Signal()
self.want_activates = Signal()
a = len(requests[0].a)
ba = len(requests[0].ba)
# cas/ras/we are 0 when valid is inactive
self.cmd = cmd = stream.Endpoint(cmd_request_rw_layout(a, ba))
# # #
n = len(requests)
valids = Signal(n)
for i, request in enumerate(requests):
is_act_cmd = request.ras & ~request.cas & ~request.we
command = request.is_cmd & self.want_cmds & (~is_act_cmd | self.want_activates)
read = request.is_read == self.want_reads
write = request.is_write == self.want_writes
self.comb += valids[i].eq(request.valid & (command | (read & write)))
arbiter = RoundRobin(n, SP_CE)
self.submodules += arbiter
choices = Array(valids[i] for i in range(n))
self.comb += [
arbiter.request.eq(valids),
cmd.valid.eq(choices[arbiter.grant])
]
for name in ["a", "ba", "is_read", "is_write", "is_cmd"]:
choices = Array(getattr(req, name) for req in requests)
self.comb += getattr(cmd, name).eq(choices[arbiter.grant])
for name in ["cas", "ras", "we"]:
# we should only assert those signals when valid is 1
choices = Array(getattr(req, name) for req in requests)
self.comb += \
If(cmd.valid,
getattr(cmd, name).eq(choices[arbiter.grant])
)
for i, request in enumerate(requests):
self.comb += \
If(cmd.valid & cmd.ready & (arbiter.grant == i),
request.ready.eq(1)
)
# Arbitrate if a command is being accepted or if the command is not valid to ensure a valid
# command is selected when cmd.ready goes high.
self.comb += arbiter.ce.eq(cmd.ready | ~cmd.valid)
# helpers
def accept(self):
return self.cmd.valid & self.cmd.ready
def activate(self):
return self.cmd.ras & ~self.cmd.cas & ~self.cmd.we
def write(self):
return self.cmd.is_write
def read(self):
return self.cmd.is_read
# _Steerer -----------------------------------------------------------------------------------------
(STEER_NOP, STEER_CMD, STEER_REQ, STEER_REFRESH) = range(4)
class _Steerer(Module):
"""Connects selected request to DFI interface
cas/ras/we/is_write/is_read are connected only when `cmd.valid & cmd.ready`.
Rank bits are decoded and used to drive cs_n in multi-rank systems,
STEER_REFRESH always enables all ranks.
Parameters
----------
commands : [Endpoint(cmd_request_rw_layout), ...]
Command streams to choose from. Must be of len=4 in the order:
NOP, CMD, REQ, REFRESH
NOP can be of type Record(cmd_request_rw_layout) instead, so that it is
always considered invalid (because of lack of the `valid` attribute).
dfi : dfi.Interface
DFI interface connected to PHY
Attributes
----------
sel : [Signal(max=len(commands)), ...], in
Signals for selecting which request gets connected to the corresponding
DFI phase. The signals should take one of the values from STEER_* to
select given source.
"""
def __init__(self, commands, dfi):
ncmd = len(commands)
nph = len(dfi.phases)
self.sel = [Signal(max=ncmd) for i in range(nph)]
# # #
def valid_and(cmd, attr):
if not hasattr(cmd, "valid"):
return 0
else:
return cmd.valid & cmd.ready & getattr(cmd, attr)
for i, (phase, sel) in enumerate(zip(dfi.phases, self.sel)):
nranks = len(phase.cs_n)
rankbits = log2_int(nranks)
if hasattr(phase, "reset_n"):
self.comb += phase.reset_n.eq(1)
self.comb += phase.cke.eq(Replicate(Signal(reset=1), nranks))
if hasattr(phase, "odt"):
# FIXME: add dynamic drive for multi-rank (will be needed for high frequencies)
self.comb += phase.odt.eq(Replicate(Signal(reset=1), nranks))
if rankbits:
rank_decoder = Decoder(nranks)
self.submodules += rank_decoder
self.comb += rank_decoder.i.eq((Array(cmd.ba[-rankbits:] for cmd in commands)[sel]))
if i == 0: # Select all ranks on refresh.
self.sync += If(sel == STEER_REFRESH, phase.cs_n.eq(0)).Else(phase.cs_n.eq(~rank_decoder.o))
else:
self.sync += phase.cs_n.eq(~rank_decoder.o)
self.sync += phase.bank.eq(Array(cmd.ba[:-rankbits] for cmd in commands)[sel])
else:
self.sync += phase.cs_n.eq(0)
self.sync += phase.bank.eq(Array(cmd.ba[:] for cmd in commands)[sel])
self.sync += [
phase.address.eq(Array(cmd.a for cmd in commands)[sel]),
phase.cas_n.eq(~Array(valid_and(cmd, "cas") for cmd in commands)[sel]),
phase.ras_n.eq(~Array(valid_and(cmd, "ras") for cmd in commands)[sel]),
phase.we_n.eq(~Array(valid_and(cmd, "we") for cmd in commands)[sel])
]
rddata_ens = Array(valid_and(cmd, "is_read") for cmd in commands)
wrdata_ens = Array(valid_and(cmd, "is_write") for cmd in commands)
self.sync += [
phase.rddata_en.eq(rddata_ens[sel]),
phase.wrdata_en.eq(wrdata_ens[sel])
]
# Multiplexer --------------------------------------------------------------------------------------
class Multiplexer(Module, AutoCSR):
"""Multplexes requets from BankMachines to DFI
This module multiplexes requests from BankMachines (and Refresher) and
connects them to DFI. Refresh commands are coordinated between the Refresher
and BankMachines to ensure there are no conflicts. Enforces required timings
between commands (some timings are enforced by BankMachines).
Parameters
----------
settings : ControllerSettings
Controller settings (with .phy, .geom and .timing settings)
bank_machines : [BankMachine, ...]
Bank machines that generate command requests to the Multiplexer
refresher : Refresher
Generates REFRESH command requests
dfi : dfi.Interface
DFI connected to the PHY
interface : LiteDRAMInterface
Data interface connected directly to LiteDRAMCrossbar
"""
def __init__(self,
settings,
bank_machines,
refresher,
dfi,
interface):
assert(settings.phy.nphases == len(dfi.phases))
ras_allowed = Signal(reset=1)
cas_allowed = Signal(reset=1)
# Read/Write Cmd/Dat phases ----------------------------------------------------------------
nphases = settings.phy.nphases
rdphase = settings.phy.rdphase
wrphase = settings.phy.wrphase
if isinstance(rdphase, Signal):
rdcmdphase = Signal.like(rdphase)
self.comb += rdcmdphase.eq(rdphase - 1) # Implicit %nphases.
else:
rdcmdphase = (rdphase - 1)%nphases
if isinstance(rdphase, Signal):
wrcmdphase = Signal.like(wrphase)
self.comb += wrcmdphase.eq(wrphase - 1) # Implicit %nphases.
else:
wrcmdphase = (wrphase - 1)%nphases
# Command choosing -------------------------------------------------------------------------
requests = [bm.cmd for bm in bank_machines]
self.submodules.choose_cmd = choose_cmd = _CommandChooser(requests)
self.submodules.choose_req = choose_req = _CommandChooser(requests)
if settings.phy.nphases == 1:
# When only 1 phase, use choose_req for all requests
choose_cmd = choose_req
self.comb += choose_req.want_cmds.eq(1)
self.comb += choose_req.want_activates.eq(ras_allowed)
# Command steering -------------------------------------------------------------------------
nop = Record(cmd_request_layout(settings.geom.addressbits,
log2_int(len(bank_machines))))
# nop must be 1st
commands = [nop, choose_cmd.cmd, choose_req.cmd, refresher.cmd]
steerer = _Steerer(commands, dfi)
self.submodules += steerer
# tRRD timing (Row to Row delay) -----------------------------------------------------------
self.submodules.trrdcon = trrdcon = tXXDController(settings.timing.tRRD)
self.comb += trrdcon.valid.eq(choose_cmd.accept() & choose_cmd.activate())
# tFAW timing (Four Activate Window) -------------------------------------------------------
self.submodules.tfawcon = tfawcon = tFAWController(settings.timing.tFAW)
self.comb += tfawcon.valid.eq(choose_cmd.accept() & choose_cmd.activate())
# RAS control ------------------------------------------------------------------------------
self.comb += ras_allowed.eq(trrdcon.ready & tfawcon.ready)
# tCCD timing (Column to Column delay) -----------------------------------------------------
self.submodules.tccdcon = tccdcon = tXXDController(settings.timing.tCCD)
self.comb += tccdcon.valid.eq(choose_req.accept() & (choose_req.write() | choose_req.read()))
# CAS control ------------------------------------------------------------------------------
self.comb += cas_allowed.eq(tccdcon.ready)
# tWTR timing (Write to Read delay) --------------------------------------------------------
write_latency = math.ceil(settings.phy.cwl / settings.phy.nphases)
self.submodules.twtrcon = twtrcon = tXXDController(
settings.timing.tWTR + write_latency +
# tCCD must be added since tWTR begins after the transfer is complete
settings.timing.tCCD if settings.timing.tCCD is not None else 0)
self.comb += twtrcon.valid.eq(choose_req.accept() & choose_req.write())
# Read/write turnaround --------------------------------------------------------------------
read_available = Signal()
write_available = Signal()
reads = [req.valid & req.is_read for req in requests]
writes = [req.valid & req.is_write for req in requests]
self.comb += [
read_available.eq(reduce(or_, reads)),
write_available.eq(reduce(or_, writes))
]
# Anti Starvation --------------------------------------------------------------------------
def anti_starvation(timeout):
en = Signal()
max_time = Signal()
if timeout:
t = timeout - 1
time = Signal(max=t+1)
self.comb += max_time.eq(time == 0)
self.sync += If(~en,
time.eq(t)
).Elif(~max_time,
time.eq(time - 1)
)
else:
self.comb += max_time.eq(0)
return en, max_time
read_time_en, max_read_time = anti_starvation(settings.read_time)
write_time_en, max_write_time = anti_starvation(settings.write_time)
# Refresh ----------------------------------------------------------------------------------
self.comb += [bm.refresh_req.eq(refresher.cmd.valid) for bm in bank_machines]
go_to_refresh = Signal()
bm_refresh_gnts = [bm.refresh_gnt for bm in bank_machines]
self.comb += go_to_refresh.eq(reduce(and_, bm_refresh_gnts))
# Datapath ---------------------------------------------------------------------------------
all_rddata = [p.rddata for p in dfi.phases]
all_wrdata = [p.wrdata for p in dfi.phases]
all_wrdata_mask = [p.wrdata_mask for p in dfi.phases]
self.comb += [
interface.rdata.eq(Cat(*all_rddata)),
Cat(*all_wrdata).eq(interface.wdata),
Cat(*all_wrdata_mask).eq(~interface.wdata_we)
]
def steerer_sel(steerer, access):
assert access in ["read", "write"]
r = []
for i in range(nphases):
r.append(steerer.sel[i].eq(STEER_NOP))
if access == "read":
r.append(If(i == rdphase, steerer.sel[i].eq(STEER_REQ)))
r.append(If(i == rdcmdphase, steerer.sel[i].eq(STEER_CMD)))
if access == "write":
r.append(If(i == wrphase, steerer.sel[i].eq(STEER_REQ)))
r.append(If(i == wrcmdphase, steerer.sel[i].eq(STEER_CMD)))
return r
# Control FSM ------------------------------------------------------------------------------
self.submodules.fsm = fsm = FSM()
fsm.act("READ",
read_time_en.eq(1),
choose_req.want_reads.eq(1),
If(settings.phy.nphases == 1,
choose_req.cmd.ready.eq(cas_allowed & (~choose_req.activate() | ras_allowed))
).Else(
choose_cmd.want_activates.eq(ras_allowed),
choose_cmd.cmd.ready.eq(~choose_cmd.activate() | ras_allowed),
choose_req.cmd.ready.eq(cas_allowed)
),
steerer_sel(steerer, access="read"),
If(write_available,
# TODO: switch only after several cycles of ~read_available?
If(~read_available | max_read_time,
NextState("RTW")
)
),
If(go_to_refresh,
NextState("REFRESH")
)
)
fsm.act("WRITE",
write_time_en.eq(1),
choose_req.want_writes.eq(1),
If(settings.phy.nphases == 1,
choose_req.cmd.ready.eq(cas_allowed & (~choose_req.activate() | ras_allowed))
).Else(
choose_cmd.want_activates.eq(ras_allowed),
choose_cmd.cmd.ready.eq(~choose_cmd.activate() | ras_allowed),
choose_req.cmd.ready.eq(cas_allowed),
),
steerer_sel(steerer, access="write"),
If(read_available,
If(~write_available | max_write_time,
NextState("WTR")
)
),
If(go_to_refresh,
NextState("REFRESH")
)
)
fsm.act("REFRESH",
steerer.sel[0].eq(STEER_REFRESH),
refresher.cmd.ready.eq(1),
If(refresher.cmd.last,
NextState("READ")
)
)
fsm.act("WTR",
If(twtrcon.ready,
NextState("READ")
)
)
# TODO: reduce this, actual limit is around (cl+1)/nphases
fsm.delayed_enter("RTW", "WRITE", settings.phy.read_latency-1)
if settings.with_bandwidth:
data_width = settings.phy.dfi_databits*settings.phy.nphases
self.submodules.bandwidth = Bandwidth(self.choose_req.cmd, data_width)
|
c4b19cb4ce7d002fd3b9503c92e62f8ec9df3f05
|
c61a4403e4925fb9c80447c982ee7f597eaf4862
|
/visual/visual_human.py
|
96bc6c43c97bef13190b969ed3cbc7cb17a965c6
|
[
"MIT"
] |
permissive
|
williamSYSU/TextGAN-PyTorch
|
f7c149fcc4032e73050acb8f6b0384ad054fd5ec
|
7ff8909e92fe01160275a6d62a34ffdfcc018bca
|
refs/heads/master
| 2023-07-20T13:33:17.573605
| 2023-02-02T02:54:25
| 2023-02-02T02:54:25
| 178,318,753
| 865
| 224
|
MIT
| 2023-07-06T13:04:26
| 2019-03-29T02:36:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,458
|
py
|
visual_human.py
|
import matplotlib.pyplot as plt
import numpy as np
n_groups = 2
# a=(2.751,,,,)
SeqGAN = 2.588
RankGAN = 0.449
LeakGAN = 3.011
RelGAN = 3.407
means_men = 20
means_women = 25
SentiGAN = 3.117
CSGAN = 2.017
CatGAN_s = 3.601
CatGAN_m = 3.411
# plt.figure(figsize=(10, 100))
fig, ax = plt.subplots(figsize=(6, 3))
index = np.arange(n_groups)
bar_width = 0.5
opacity = 1.0
error_config = {'ecolor': '0'}
rects1 = ax.bar(0, CSGAN, bar_width, linestyle='-', linewidth=1, edgecolor='black',
alpha=opacity, color='#8e44ad', error_kw=error_config,
label='CSGAN')
rects2 = ax.bar(bar_width, SentiGAN, bar_width, linestyle='-', linewidth=1, edgecolor='black',
alpha=opacity, color='#27ae60', error_kw=error_config,
label='SentiGAN')
rects3 = ax.bar(0 + 2 * bar_width, CatGAN_m, bar_width, linestyle='-', linewidth=1, edgecolor='black',
alpha=opacity, color='#d35400', error_kw=error_config,
label='CatGAN ($k=2$)')
gap = 1.2
rects4 = ax.bar(3 * bar_width + gap, SeqGAN, bar_width, linestyle='-', linewidth=1, edgecolor='black',
alpha=opacity, color='#fd79a8', error_kw=error_config,
label='SeqGAN')
rects5 = ax.bar(4 * bar_width + gap, RankGAN, bar_width, linestyle='-', linewidth=1, edgecolor='black',
alpha=opacity, color='#34495e', error_kw=error_config,
label='RankGAN')
rects6 = ax.bar(0 + 5 * bar_width + gap, LeakGAN, bar_width, linestyle='-', linewidth=1, edgecolor='black',
alpha=opacity, color='#f1c40f', error_kw=error_config,
label='LeakGAN')
rects7 = ax.bar(6 * bar_width + gap, RelGAN, bar_width, linestyle='-', linewidth=1, edgecolor='black',
alpha=opacity, color='#2980b9', error_kw=error_config,
label='RelGAN')
rects8 = ax.bar(7 * bar_width + gap, CatGAN_s, bar_width, linestyle='-', linewidth=1, edgecolor='black',
alpha=opacity, color='#c0392b', error_kw=error_config,
label='CatGAN ($k=1$)')
ax.set_xlabel('Dataset')
ax.set_ylabel('Human Score')
# ax.set_title('Scores by group and gender')
len = ((0 + 3 * bar_width) / 3, 3 * bar_width + gap + 2 * bar_width)
ax.set_xticks(len)
ax.set_xticklabels(('AR', 'EN'))
ax.legend(bbox_to_anchor=(1, 0), loc=3, borderaxespad=0.2)
# plt.legend()
fig.tight_layout()
plt.savefig('savefig/human.pdf')
plt.show()
# plt.savefig('C:/1123.pdf')
|
6eceac8975f585189168179ec02dd7f730a0c2d7
|
7a6b4705293709e32a6927ad4f76eb0549f3bea9
|
/orchestra/tests/helpers/notifications.py
|
793a2eb9a82852e104ecd2a223f8131579c3e8c6
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
b12io/orchestra
|
a71941d80d1eeddb73f301da8f601b2c31a4b279
|
ee8a29122a3491feae1e1c2c4699142726ae6c21
|
refs/heads/main
| 2023-08-20T17:46:36.360755
| 2023-06-27T13:32:46
| 2023-06-27T13:32:46
| 42,593,972
| 459
| 66
|
Apache-2.0
| 2023-06-27T13:32:48
| 2015-09-16T14:55:16
|
Python
|
UTF-8
|
Python
| false
| false
| 384
|
py
|
notifications.py
|
class MockMail:
def __init__(self):
self.inbox = []
def send_mail(self, recipient_list, **kwargs):
for recipient in recipient_list:
self.inbox.append({
'recipient': recipient,
'subject': kwargs['subject'],
'message': kwargs['message']
})
def clear(self):
self.inbox[:] = []
|
414aaf398e3e4a435718c3923a2ab3bd1ee72ccd
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Pandas_numpy/source/pandas/core/indexes/accessors.py
|
2176338574304a21752ee30843aeb1037b391f54
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 7,815
|
py
|
accessors.py
|
"""
datetimelike delegation
"""
import numpy as np
from pandas.core.dtypes.common import (
is_period_arraylike,
is_datetime_arraylike, is_integer_dtype,
is_datetime64_dtype, is_datetime64tz_dtype,
is_timedelta64_dtype, is_categorical_dtype,
is_list_like)
from pandas.core.accessor import PandasDelegate
from pandas.core.base import NoNewAttributesMixin, PandasObject
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas._libs.period import IncompatibleFrequency # noqa
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.core.algorithms import take_1d
def is_datetimelike(data):
"""
return a boolean if we can be successfully converted to a datetimelike
"""
try:
maybe_to_datetimelike(data)
return True
except (Exception):
pass
return False
def maybe_to_datetimelike(data, copy=False):
"""
return a DelegatedClass of a Series that is datetimelike
(e.g. datetime64[ns],timedelta64[ns] dtype or a Series of Periods)
raise TypeError if this is not possible.
Parameters
----------
data : Series
copy : boolean, default False
copy the input data
Returns
-------
DelegatedClass
"""
from pandas import Series
if not isinstance(data, Series):
raise TypeError("cannot convert an object of type {0} to a "
"datetimelike index".format(type(data)))
index = data.index
name = data.name
orig = data if is_categorical_dtype(data) else None
if orig is not None:
data = orig.values.categories
if is_datetime64_dtype(data.dtype):
return DatetimeProperties(DatetimeIndex(data, copy=copy),
index, name=name, orig=orig)
elif is_datetime64tz_dtype(data.dtype):
return DatetimeProperties(DatetimeIndex(data, copy=copy),
index, data.name, orig=orig)
elif is_timedelta64_dtype(data.dtype):
return TimedeltaProperties(TimedeltaIndex(data, copy=copy), index,
name=name, orig=orig)
else:
if is_period_arraylike(data):
return PeriodProperties(PeriodIndex(data, copy=copy), index,
name=name, orig=orig)
if is_datetime_arraylike(data):
return DatetimeProperties(DatetimeIndex(data, copy=copy), index,
name=name, orig=orig)
raise TypeError("cannot convert an object of type {0} to a "
"datetimelike index".format(type(data)))
class Properties(PandasDelegate, PandasObject, NoNewAttributesMixin):
def __init__(self, values, index, name, orig=None):
self.values = values
self.index = index
self.name = name
self.orig = orig
self._freeze()
def _delegate_property_get(self, name):
from pandas import Series
result = getattr(self.values, name)
# maybe need to upcast (ints)
if isinstance(result, np.ndarray):
if is_integer_dtype(result):
result = result.astype('int64')
elif not is_list_like(result):
return result
result = np.asarray(result)
# blow up if we operate on categories
if self.orig is not None:
result = take_1d(result, self.orig.cat.codes)
# return the result as a Series, which is by definition a copy
result = Series(result, index=self.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result.is_copy = ("modifications to a property of a datetimelike "
"object are not supported and are discarded. "
"Change values on the original.")
return result
def _delegate_property_set(self, name, value, *args, **kwargs):
raise ValueError("modifications to a property of a datetimelike "
"object are not supported. Change values on the "
"original.")
def _delegate_method(self, name, *args, **kwargs):
from pandas import Series
method = getattr(self.values, name)
result = method(*args, **kwargs)
if not is_list_like(result):
return result
result = Series(result, index=self.index, name=self.name)
# setting this object will show a SettingWithCopyWarning/Error
result.is_copy = ("modifications to a method of a datetimelike object "
"are not supported and are discarded. Change "
"values on the original.")
return result
class DatetimeProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hour
>>> s.dt.second
>>> s.dt.quarter
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
def to_pydatetime(self):
return self.values.to_pydatetime()
@property
def freq(self):
return self.values.inferred_freq
DatetimeProperties._add_delegate_accessors(
delegate=DatetimeIndex,
accessors=DatetimeIndex._datetimelike_ops,
typ='property')
DatetimeProperties._add_delegate_accessors(
delegate=DatetimeIndex,
accessors=DatetimeIndex._datetimelike_methods,
typ='method')
class TimedeltaProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hours
>>> s.dt.seconds
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
def to_pytimedelta(self):
return self.values.to_pytimedelta()
@property
def components(self):
"""
Return a dataframe of the components (days, hours, minutes,
seconds, milliseconds, microseconds, nanoseconds) of the Timedeltas.
Returns
-------
a DataFrame
"""
return self.values.components.set_index(self.index)
@property
def freq(self):
return self.values.inferred_freq
TimedeltaProperties._add_delegate_accessors(
delegate=TimedeltaIndex,
accessors=TimedeltaIndex._datetimelike_ops,
typ='property')
TimedeltaProperties._add_delegate_accessors(
delegate=TimedeltaIndex,
accessors=TimedeltaIndex._datetimelike_methods,
typ='method')
class PeriodProperties(Properties):
"""
Accessor object for datetimelike properties of the Series values.
Examples
--------
>>> s.dt.hour
>>> s.dt.second
>>> s.dt.quarter
Returns a Series indexed like the original Series.
Raises TypeError if the Series does not contain datetimelike values.
"""
PeriodProperties._add_delegate_accessors(
delegate=PeriodIndex,
accessors=PeriodIndex._datetimelike_ops,
typ='property')
PeriodProperties._add_delegate_accessors(
delegate=PeriodIndex,
accessors=PeriodIndex._datetimelike_methods,
typ='method')
class CombinedDatetimelikeProperties(DatetimeProperties, TimedeltaProperties):
# This class is never instantiated, and exists solely for the benefit of
# the Series.dt class property. For Series objects, .dt will always be one
# of the more specific classes above.
__doc__ = DatetimeProperties.__doc__
@classmethod
def _make_accessor(cls, data):
try:
return maybe_to_datetimelike(data)
except Exception:
raise AttributeError("Can only use .dt accessor with "
"datetimelike values")
|
8b2fe18a12eafa35398360f6e569a220e3450582
|
975b2d421d3661e6770b601929d5f11d981d8985
|
/msgraph/generated/models/item_activity_stat.py
|
8b52a48e3a4ec6af655905103d13a92cdf32380a
|
[
"MIT"
] |
permissive
|
microsoftgraph/msgraph-sdk-python
|
a7c551b85daadeebf76ec4ae12668664ea639b42
|
27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949
|
refs/heads/main
| 2023-09-03T21:45:27.989672
| 2023-08-31T06:22:18
| 2023-08-31T06:22:18
| 534,665,999
| 135
| 18
|
MIT
| 2023-09-14T11:04:11
| 2022-09-09T14:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 4,958
|
py
|
item_activity_stat.py
|
from __future__ import annotations
import datetime
from dataclasses import dataclass, field
from kiota_abstractions.serialization import Parsable, ParseNode, SerializationWriter
from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union
if TYPE_CHECKING:
from .entity import Entity
from .incomplete_data import IncompleteData
from .item_action_stat import ItemActionStat
from .item_activity import ItemActivity
from .entity import Entity
@dataclass
class ItemActivityStat(Entity):
# Statistics about the access actions in this interval. Read-only.
access: Optional[ItemActionStat] = None
# Exposes the itemActivities represented in this itemActivityStat resource.
activities: Optional[List[ItemActivity]] = None
# Statistics about the create actions in this interval. Read-only.
create: Optional[ItemActionStat] = None
# Statistics about the delete actions in this interval. Read-only.
delete: Optional[ItemActionStat] = None
# Statistics about the edit actions in this interval. Read-only.
edit: Optional[ItemActionStat] = None
# When the interval ends. Read-only.
end_date_time: Optional[datetime.datetime] = None
# Indicates that the statistics in this interval are based on incomplete data. Read-only.
incomplete_data: Optional[IncompleteData] = None
# Indicates whether the item is 'trending.' Read-only.
is_trending: Optional[bool] = None
# Statistics about the move actions in this interval. Read-only.
move: Optional[ItemActionStat] = None
# The OdataType property
odata_type: Optional[str] = None
# When the interval starts. Read-only.
start_date_time: Optional[datetime.datetime] = None
@staticmethod
def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> ItemActivityStat:
"""
Creates a new instance of the appropriate class based on discriminator value
Args:
parse_node: The parse node to use to read the discriminator value and create the object
Returns: ItemActivityStat
"""
if not parse_node:
raise TypeError("parse_node cannot be null.")
return ItemActivityStat()
def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:
"""
The deserialization information for the current model
Returns: Dict[str, Callable[[ParseNode], None]]
"""
from .entity import Entity
from .incomplete_data import IncompleteData
from .item_action_stat import ItemActionStat
from .item_activity import ItemActivity
from .entity import Entity
from .incomplete_data import IncompleteData
from .item_action_stat import ItemActionStat
from .item_activity import ItemActivity
fields: Dict[str, Callable[[Any], None]] = {
"access": lambda n : setattr(self, 'access', n.get_object_value(ItemActionStat)),
"activities": lambda n : setattr(self, 'activities', n.get_collection_of_object_values(ItemActivity)),
"create": lambda n : setattr(self, 'create', n.get_object_value(ItemActionStat)),
"delete": lambda n : setattr(self, 'delete', n.get_object_value(ItemActionStat)),
"edit": lambda n : setattr(self, 'edit', n.get_object_value(ItemActionStat)),
"endDateTime": lambda n : setattr(self, 'end_date_time', n.get_datetime_value()),
"incompleteData": lambda n : setattr(self, 'incomplete_data', n.get_object_value(IncompleteData)),
"isTrending": lambda n : setattr(self, 'is_trending', n.get_bool_value()),
"move": lambda n : setattr(self, 'move', n.get_object_value(ItemActionStat)),
"startDateTime": lambda n : setattr(self, 'start_date_time', n.get_datetime_value()),
}
super_fields = super().get_field_deserializers()
fields.update(super_fields)
return fields
def serialize(self,writer: SerializationWriter) -> None:
"""
Serializes information the current object
Args:
writer: Serialization writer to use to serialize this model
"""
if not writer:
raise TypeError("writer cannot be null.")
super().serialize(writer)
writer.write_object_value("access", self.access)
writer.write_collection_of_object_values("activities", self.activities)
writer.write_object_value("create", self.create)
writer.write_object_value("delete", self.delete)
writer.write_object_value("edit", self.edit)
writer.write_datetime_value("endDateTime", self.end_date_time)
writer.write_object_value("incompleteData", self.incomplete_data)
writer.write_bool_value("isTrending", self.is_trending)
writer.write_object_value("move", self.move)
writer.write_datetime_value("startDateTime", self.start_date_time)
|
57bf1740ef5fd46ce70773388281a1dd05c0c37f
|
767c07db1fb131047af3d9b0a065b8fdc8aac9ab
|
/73-mpl/plot-Links.py
|
63faed472990f6f6a2b2feb352c342a47abda652
|
[] |
no_license
|
DUanalytics/pyAnalytics
|
e52c5469da30a5f436ec0f3120d9f15fb82fd9b3
|
107a08bebe46ea51afccfeae4a666213bb405d41
|
refs/heads/master
| 2023-07-08T04:32:54.758902
| 2023-07-03T14:37:04
| 2023-07-03T14:37:04
| 202,094,535
| 394
| 31
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 147
|
py
|
plot-Links.py
|
#PLots Links
#-----------------------------
#%
https://www.machinelearningplus.com/plots/matplotlib-tutorial-complete-guide-python-plot-examples/
|
0586ce8886ee5214ad685e82afcc2eec46cf03ec
|
1ed25da5d1e27cd49fb4a02acfe99aadcf2fae57
|
/pygeoapi/process/manager/__init__.py
|
2019a54b7e7860562fd29858f2289ce8aa21926c
|
[
"MIT"
] |
permissive
|
geopython/pygeoapi
|
6d2a7b0e8fe75d0c454a0b2fc3599a0b88c7567f
|
2d3ec88320cf5e1ed47b4b794f40b453bad487e2
|
refs/heads/master
| 2023-09-04T04:30:59.768950
| 2023-09-03T02:00:23
| 2023-09-03T02:00:23
| 121,585,259
| 391
| 245
|
MIT
| 2023-09-13T18:13:00
| 2018-02-15T02:46:27
|
Python
|
UTF-8
|
Python
| false
| false
| 2,357
|
py
|
__init__.py
|
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Ricardo Garcia Silva <ricardo.garcia.silva@geobeyond.it>
#
# Copyright (c) 2019 Tom Kralidis
# (c) 2023 Ricardo Garcia Silva
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
from typing import Dict
from pygeoapi.plugin import load_plugin
from pygeoapi.process.manager.base import BaseManager
LOGGER = logging.getLogger(__name__)
def get_manager(config: Dict) -> BaseManager:
"""Instantiate process manager from the supplied configuration.
:param config: pygeoapi configuration
:returns: The pygeoapi process manager object
"""
manager_conf = config.get('server', {}).get(
'manager',
{
'name': 'Dummy',
'connection': None,
'output_dir': None
}
)
processes_conf = {}
for id_, resource_conf in config.get('resources', {}).items():
if resource_conf.get('type') == 'process':
processes_conf[id_] = resource_conf
manager_conf['processes'] = processes_conf
if manager_conf.get('name') == 'Dummy':
LOGGER.info('Starting dummy manager')
return load_plugin('process_manager', manager_conf)
|
88e00c6beee1aa2bf4962ac7df5f6c20b0d6c321
|
56d6257e932e1397ab03b1e7ccc6231378665b04
|
/APC40/TransportComponent.py
|
fe8d5a18ccb63a88ced14942861546d146de03cb
|
[] |
no_license
|
gluon/AbletonLive10.1_MIDIRemoteScripts
|
e6c8dc4956cff9630aaa36f3667994387ad1d0cf
|
2468b51eba7e5082b06f9e381b3e72027c5f272c
|
refs/heads/master
| 2023-01-10T18:37:46.504180
| 2022-12-23T09:21:48
| 2022-12-23T09:21:48
| 213,423,555
| 205
| 59
| null | 2021-02-12T16:15:01
| 2019-10-07T15:44:52
|
Python
|
UTF-8
|
Python
| false
| false
| 2,020
|
py
|
TransportComponent.py
|
#Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/APC40/TransportComponent.py
from __future__ import absolute_import, print_function, unicode_literals
import Live
from _Framework.Control import ButtonControl
from _Framework.TransportComponent import TransportComponent as TransportComponentBase
from _Framework.SubjectSlot import subject_slot
class TransportComponent(TransportComponentBase):
u""" TransportComponent that only uses certain buttons if a shift button is pressed """
rec_quantization_button = ButtonControl()
def __init__(self, *a, **k):
super(TransportComponent, self).__init__(*a, **k)
self._last_quant_value = Live.Song.RecordingQuantization.rec_q_eight
self._on_quantization_changed.subject = self.song()
self._update_quantization_state()
self.set_quant_toggle_button = self.rec_quantization_button.set_control_element
@rec_quantization_button.pressed
def rec_quantization_button(self, value):
assert self._last_quant_value != Live.Song.RecordingQuantization.rec_q_no_q
quant_value = self.song().midi_recording_quantization
if quant_value != Live.Song.RecordingQuantization.rec_q_no_q:
self._last_quant_value = quant_value
self.song().midi_recording_quantization = Live.Song.RecordingQuantization.rec_q_no_q
else:
self.song().midi_recording_quantization = self._last_quant_value
@subject_slot(u'midi_recording_quantization')
def _on_quantization_changed(self):
if self.is_enabled():
self._update_quantization_state()
def _update_quantization_state(self):
quant_value = self.song().midi_recording_quantization
quant_on = quant_value != Live.Song.RecordingQuantization.rec_q_no_q
if quant_on:
self._last_quant_value = quant_value
self.rec_quantization_button.color = u'DefaultButton.On' if quant_on else u'DefaultButton.Off'
|
2edc98bdfcbac7f70eb220579798d1079ce95147
|
902eb0e21020148d164e5a49694980315213188b
|
/anymail/backends/test.py
|
3a9d5353657ca17a1abb0b5e4785540fd88249ea
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
anymail/django-anymail
|
4ca8dfff50ea2a2987b389693e46748e7717367d
|
42dbfcf2c45f38a004a1f576dad38e7ad5d554cb
|
refs/heads/main
| 2023-09-01T06:52:35.147257
| 2023-08-25T19:19:05
| 2023-08-25T19:35:08
| 53,549,881
| 1,556
| 150
|
NOASSERTION
| 2023-08-25T19:35:09
| 2016-03-10T02:55:09
|
Python
|
UTF-8
|
Python
| false
| false
| 5,663
|
py
|
test.py
|
from django.core import mail
from ..exceptions import AnymailAPIError
from ..message import AnymailRecipientStatus
from .base import AnymailBaseBackend, BasePayload
class EmailBackend(AnymailBaseBackend):
"""
Anymail backend that simulates sending messages, useful for testing.
Sent messages are collected in django.core.mail.outbox
(as with Django's locmem backend).
In addition:
* Anymail send params parsed from the message will be attached
to the outbox message as a dict in the attr `anymail_test_params`
* If the caller supplies an `anymail_test_response` attr on the message,
that will be used instead of the default "sent" response. It can be either
an AnymailRecipientStatus or an instance of AnymailAPIError (or a subclass)
to raise an exception.
"""
esp_name = "Test"
def __init__(self, *args, **kwargs):
# Allow replacing the payload, for testing.
# (Real backends would generally not implement this option.)
self._payload_class = kwargs.pop("payload_class", TestPayload)
super().__init__(*args, **kwargs)
if not hasattr(mail, "outbox"):
mail.outbox = [] # see django.core.mail.backends.locmem
def get_esp_message_id(self, message):
# Get a unique ID for the message. The message must have been added to
# the outbox first.
return mail.outbox.index(message)
def build_message_payload(self, message, defaults):
return self._payload_class(backend=self, message=message, defaults=defaults)
def post_to_esp(self, payload, message):
# Keep track of the sent messages and params (for test cases)
message.anymail_test_params = payload.get_params()
mail.outbox.append(message)
try:
# Tests can supply their own message.test_response:
response = message.anymail_test_response
if isinstance(response, AnymailAPIError):
raise response
except AttributeError:
# Default is to return 'sent' for each recipient
status = AnymailRecipientStatus(
message_id=self.get_esp_message_id(message), status="sent"
)
response = {
"recipient_status": {
email: status for email in payload.recipient_emails
}
}
return response
def parse_recipient_status(self, response, payload, message):
try:
return response["recipient_status"]
except KeyError as err:
raise AnymailAPIError("Unparsable test response") from err
class TestPayload(BasePayload):
# For test purposes, just keep a dict of the params we've received.
# (This approach is also useful for native API backends -- think of
# payload.params as collecting kwargs for esp_native_api.send().)
def init_payload(self):
self.params = {}
self.recipient_emails = []
def get_params(self):
# Test backend callers can check message.anymail_test_params['is_batch_send']
# to verify whether Anymail thought the message should use batch send logic.
self.params["is_batch_send"] = self.is_batch()
return self.params
def set_from_email(self, email):
self.params["from"] = email
def set_envelope_sender(self, email):
self.params["envelope_sender"] = email.addr_spec
def set_to(self, emails):
self.params["to"] = emails
self.recipient_emails += [email.addr_spec for email in emails]
def set_cc(self, emails):
self.params["cc"] = emails
self.recipient_emails += [email.addr_spec for email in emails]
def set_bcc(self, emails):
self.params["bcc"] = emails
self.recipient_emails += [email.addr_spec for email in emails]
def set_subject(self, subject):
self.params["subject"] = subject
def set_reply_to(self, emails):
self.params["reply_to"] = emails
def set_extra_headers(self, headers):
self.params["extra_headers"] = headers
def set_text_body(self, body):
self.params["text_body"] = body
def set_html_body(self, body):
self.params["html_body"] = body
def add_alternative(self, content, mimetype):
# For testing purposes, we allow all "text/*" alternatives,
# but not any other mimetypes.
if mimetype.startswith("text"):
self.params.setdefault("alternatives", []).append((content, mimetype))
else:
self.unsupported_feature("alternative part with type '%s'" % mimetype)
def add_attachment(self, attachment):
self.params.setdefault("attachments", []).append(attachment)
def set_metadata(self, metadata):
self.params["metadata"] = metadata
def set_send_at(self, send_at):
self.params["send_at"] = send_at
def set_tags(self, tags):
self.params["tags"] = tags
def set_track_clicks(self, track_clicks):
self.params["track_clicks"] = track_clicks
def set_track_opens(self, track_opens):
self.params["track_opens"] = track_opens
def set_template_id(self, template_id):
self.params["template_id"] = template_id
def set_merge_data(self, merge_data):
self.params["merge_data"] = merge_data
def set_merge_metadata(self, merge_metadata):
self.params["merge_metadata"] = merge_metadata
def set_merge_global_data(self, merge_global_data):
self.params["merge_global_data"] = merge_global_data
def set_esp_extra(self, extra):
# Merge extra into params
self.params.update(extra)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.