function_name
stringlengths 1
63
| docstring
stringlengths 50
5.89k
| masked_code
stringlengths 50
882k
| implementation
stringlengths 169
12.9k
| start_line
int32 1
14.6k
| end_line
int32 16
14.6k
| file_content
stringlengths 274
882k
|
|---|---|---|---|---|---|---|
format_excitation_indices
|
Consistent formatting of excitation indices
idx = [(p0,q0),(p1,q1),...,(pn,qn)]
sorted as: p0<p1<pn and pi<qi
:param idx: list of index tuples describing a single(!) fermionic excitation
:return: tuple-list of index tuples
|
import os
from dataclasses import dataclass
from tequila import TequilaException, BitString, TequilaWarning
from tequila.hamiltonian import QubitHamiltonian
from tequila.wavefunction import QubitWaveFunction
from tequila.hamiltonian.paulis import Sp, Sm, Qp, Qm
from tequila.circuit import QCircuit, gates, _gates_impl
from tequila.objective.objective import Variable, Variables, ExpectationValue
from tequila.simulators.simulator_api import simulate
from tequila.utils import to_float
from tequila.objective import assign_variable
from .encodings import known_encodings
import typing, numpy, numbers, copy
from itertools import product
# if you are experiencing import errors you need to update openfermion
# required is version >= 1.0
# otherwise replace with from openfermion.hamiltonians import MolecularData
import openfermion
from openfermion.chem import MolecularData
import warnings
@dataclass
class ActiveSpaceData:
active_orbitals: list # active orbitals (spatial, c1)
reference_orbitals: list # reference orbitals (spatial, c1)
def __str__(self):
result = "Active Space Data:\n"
result += "{key:15} : {value:15} \n".format(key="active_orbitals", value=str(self.active_orbitals))
result += "{key:15} : {value:15} \n".format(key="reference_orbitals",
value=str(self.reference_orbitals))
result += "{key:15} : {value:15} \n".format(key="frozen_docc", value=str(self.frozen_docc))
result += "{key:15} : {value:15} \n".format(key="frozen_uocc", value=str(self.frozen_uocc))
return result
@property
def frozen_reference_orbitals(self):
return [i for i in self.reference_orbitals if i not in self.active_orbitals]
@property
def active_reference_orbitals(self):
return [i for i in self.reference_orbitals if i in self.active_orbitals]
class FermionicGateImpl(gates.QubitExcitationImpl):
# keep the overview in circuits
def __init__(self, generator, p0, transformation, *args, **kwargs):
super().__init__(generator=generator, target=generator.qubits, p0=p0, *args, **kwargs)
self._name = "FermionicExcitation"
self.transformation=transformation
def compile(self):
return gates.Trotterized(generator=self.generator, control=self.control, angle=self.parameter, steps=1)
def prepare_product_state(state: BitString) -> QCircuit:
"""Small convenience function
Parameters
----------
state :
product state encoded into a bitstring
state: BitString :
Returns
-------
type
unitary circuit which prepares the product state
"""
result = QCircuit()
for i, v in enumerate(state.array):
if v == 1:
result += gates.X(target=i)
return result
@dataclass
class ParametersQC:
"""Specialization of ParametersHamiltonian"""
basis_set: str = None # Quantum chemistry basis set
geometry: str = None # geometry of the underlying molecule (units: Angstrom!),
# this can be a filename leading to an .xyz file or the geometry given as a string
description: str = ""
multiplicity: int = 1
charge: int = 0
name: str = None
@property
def n_electrons(self, *args, **kwargs):
return self.get_nuc_charge() - self.charge
def get_nuc_charge(self):
return sum(self.get_atom_number(name=atom) for atom in self.get_atoms())
def get_atom_number(self, name):
atom_numbers={"h":1, "he":2, "li":3, "be":4, "b":5, "c":6, "n":7, "o":8, "f":9, "ne":10, "na":11, "mg":12, "al":13, "si":14, "ph":15, "s":16, "cl":17, "ar":18}
if name.lower() in atom_numbers:
return atom_numbers[name.lower()]
try:
import periodictable as pt
atom=name.lower()
atom[0]=atom[0].upper()
element = pt.elements.symbol(atom)
return element.number()
except:
raise TequilaException("can not assign atomic number to element {}\npip install periodictable will fix it".format(atom))
def get_atoms(self):
return [x[0] for x in self.get_geometry()]
def __post_init__(self,*args, **kwargs):
if self.name is None and self.geometry is None:
raise TequilaException("no geometry or name given to molecule\nprovide geometry=filename.xyz or geometry=`h 0.0 0.0 0.0\\n...`\nor name=whatever with file whatever.xyz being present")
# auto naming
if self.name is None:
if ".xyz" in self.geometry:
self.name=self.geometry.split(".xyz")[0]
if self.description is None:
coord, description = self.read_xyz_from_file()
self.description=description
else:
atoms=self.get_atoms()
atom_names=sorted(list(set(atoms)), key=lambda x: self.get_atom_number(x), reverse=True)
if self.name is None:
drop_ones=lambda x: "" if x==1 else x
self.name="".join(["{}{}".format(x,drop_ones(atoms.count(x))) for x in atom_names])
self.name = self.name.lower()
if self.geometry is None:
self.geometry=self.name+".xyz"
if ".xyz" in self.geometry and not os.path.isfile(self.geometry):
raise TequilaException("could not find file for molecular coordinates {}".format(self.geometry))
@property
def filename(self):
""" """
return "{}_{}".format(self.name, self.basis_set)
@property
def molecular_data_param(self) -> dict:
""":return: Give back all parameters for the MolecularData format from openfermion as dictionary"""
return {'basis': self.basis_set, 'geometry': self.get_geometry(), 'description': self.description,
'charge': self.charge, 'multiplicity': self.multiplicity, 'filename': self.filename
}
@staticmethod
def format_element_name(string):
"""OpenFermion uses case sensitive hash tables for chemical elements
I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work
this convenience function does the naming
:return: first letter converted to upper rest to lower
Parameters
----------
string :
Returns
-------
"""
assert (len(string) > 0)
assert (isinstance(string, str))
fstring = string[0].upper() + string[1:].lower()
return fstring
@staticmethod
def convert_to_list(geometry):
"""Convert a molecular structure given as a string into a list suitable for openfermion
Parameters
----------
geometry :
a string specifying a mol. structure. E.g. geometry="h 0.0 0.0 0.0\n h 0.0 0.0 1.0"
Returns
-------
type
A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]
"""
result = []
# Remove blank lines
lines = [l for l in geometry.split("\n") if l]
for line in lines:
words = line.split()
# Pad coordinates
if len(words) < 4:
words += [0.0] * (4 - len(words))
try:
tmp = (ParametersQC.format_element_name(words[0]),
(float(words[1]), float(words[2]), float(words[3])))
result.append(tmp)
except ValueError:
print("get_geometry list unknown line:\n ", line, "\n proceed with caution!")
return result
def get_geometry_string(self) -> str:
"""returns the geometry as a string
:return: geometry string
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if comment is not None:
self.description = comment
return geomstring
else:
return self.geometry
def get_geometry(self):
"""Returns the geometry
If a xyz filename was given the file is read out
otherwise it is assumed that the geometry was given as string
which is then reformatted as a list usable as input for openfermion
:return: geometry as list
e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]
Units: Angstrom!
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if self.description == '':
self.description = comment
return self.convert_to_list(geomstring)
elif self.geometry is not None:
return self.convert_to_list(self.geometry)
else:
raise Exception("Parameters.qc.geometry is None")
@staticmethod
def read_xyz_from_file(filename):
"""Read XYZ filetype for molecular structures
https://en.wikipedia.org/wiki/XYZ_file_format
Units: Angstrom!
Parameters
----------
filename :
return:
Returns
-------
"""
with open(filename, 'r') as file:
content = file.readlines()
natoms = int(content[0])
comment = str(content[1]).strip('\n')
coord = ''
for i in range(natoms):
coord += content[2 + i]
return coord, comment
@dataclass
class ClosedShellAmplitudes:
""" """
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Returns
-------
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, J, A, B), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(nocc + A, I, nocc + B, J)] = value
if self.tIA is not None:
nocc = self.tIA.shape[0]
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(A + nocc, I)] = value
return dict(sorted(variables.items(), key=lambda x: numpy.abs(x[1]), reverse=True))
@dataclass
class Amplitudes:
"""Coupled-Cluster Amplitudes
We adopt the Psi4 notation for consistency
I,A for alpha
i,a for beta
Parameters
----------
Returns
-------
"""
@classmethod
def from_closed_shell(cls, cs: ClosedShellAmplitudes):
"""
Initialize from closed-shell Amplitude structure
Parameters
----------
cs: ClosedShellAmplitudes :
Returns
-------
"""
tijab = cs.tIjAb - numpy.einsum("ijab -> ijba", cs.tIjAb, optimize='greedy')
return cls(tIjAb=cs.tIjAb, tIA=cs.tIA, tiJaB=cs.tIjAb, tia=cs.tIA, tijab=tijab, tIJAB=tijab)
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
tiJaB: numpy.ndarray = None
tijab: numpy.ndarray = None
tIJAB: numpy.ndarray = None
tia: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Neglect amplitudes below the threshold
Returns
-------
Dictionary of tequila variables (hash is in the style of (a,i,b,j))
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, j, A, b), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + b) + 1, j + 1)] = value
for (i, J, a, B), value in numpy.ndenumerate(self.tiJaB):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + B), J)] = value
for (i, j, a, b), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + b) + 1, j + 1)] = value
for (I, J, A, B), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + B), J)] = value
if self.tIA is not None:
nocc = self.tIjAb.shape[0]
assert (self.tia.shape[0] == nocc)
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (A + nocc), 2 * I)] = value
for (i, a), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (a + nocc) + 1, 2 * i + 1)] = value
return variables
class NBodyTensor:
""" Convenience class for handling N-body tensors """
class Ordering:
def __init__(self, scheme):
if hasattr(scheme, "_scheme"):
scheme = scheme._scheme
elif hasattr(scheme, "scheme"):
scheme = scheme.scheme
self._scheme = self.assign_scheme(scheme)
def assign_scheme(self, scheme):
if scheme is None:
return "chem"
else:
scheme = str(scheme)
if scheme.lower() in ["mulliken", "chem", "c", "1122"]:
return "chem"
elif scheme.lower() in ["dirac", "phys", "p", "1212"]:
return "phys"
elif scheme.lower() in ["openfermion", "of", "o", "1221"]:
return "of"
else:
raise TequilaException(
"Unknown two-body tensor scheme {}. Supported are dirac, mulliken, and openfermion".format(scheme))
def is_phys(self):
return self._scheme == "phys"
def is_chem(self):
return self._scheme == "chem"
def is_of(self):
return self._scheme == "of"
def __init__(self, elems: numpy.ndarray = None, active_indices: list = None, ordering: str = None,
size_full: int = None):
"""
Parameters
----------
elems: Tensor data as numpy array
active_indices: List of active indices in total ordering
ordering: Ordering scheme for two body tensors
"dirac" or "phys": <12|g|12>
.. math::
g_{pqrs} = \\int d1 d2 p(1)q(2) g(1,2) r(1)s(2)
"mulliken" or "chem": (11|g|22)
.. math::
g_{pqrs} = \\int d1 d2 p(1)r(2) g(1,2) q(1)s(2)
"openfermion":
.. math:: [12|g|21]
g_{gqprs} = \\int d1 d2 p(1)q(2) g(1,2) s(1)r(2)
size_full
"""
# Set elements
self.elems = elems
# Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible
# representations
if active_indices is not None:
self.active_indices = active_indices
self._passive_indices = None
self._full_indices = None
self._indices_set: bool = False
# Determine order of tensor
# Assume, that tensor is entered in desired shape, not as flat array.
self.order = len(self.elems.shape)
# Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well
if size_full is None:
self._size_full = self.elems.shape[0]
else:
self._size_full = size_full
# 2-body tensors (<=> order 4) currently allow reordering
if self.order == 4:
self.ordering = self.Ordering(ordering)
else:
if ordering is not None:
raise Exception("Ordering only implemented for tensors of order 4 / 2-body tensors.")
self.ordering = None
def sub_lists(self, idx_lists: list = None) -> numpy.ndarray:
"""
Get subspace of tensor by a set of index lists
according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]
This essentially is an implementation of a non-contiguous slicing using numpy.take
Parameters
----------
idx_lists :
List of lists, each defining the desired subspace per axis
Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
# Check if index list has correct size
if len(idx_lists) != self.order:
raise Exception("Need to pass an index list for each dimension!" +
" Length of idx_lists needs to match order of tensor.")
# Perform slicing via numpy.take
out = self.elems
for ax in range(self.order):
if idx_lists[ax] is not None: # None means, we want the full space in this direction
out = numpy.take(out, idx_lists[ax], axis=ax)
return out
def set_index_lists(self):
""" Set passive and full index lists based on class inputs """
tmp_size = self._size_full
if self._size_full is None:
tmp_size = self.elems.shape[0]
self._passive_indices = [i for i in range(tmp_size)
if i not in self.active_indices]
self._full_indices = [i for i in range(tmp_size)]
def sub_str(self, name: str) -> numpy.ndarray:
"""
Get subspace of tensor by a string
Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.
Full space in this context may also be smaller than actual tensor dimension.
The specification of active space in this context only allows to pick a set from a list of orbitals, and
is not able to resolve an active space from irreducible representations.
Example for one-body tensor:
hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]
Parameters
----------
name :
String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
if not self._indices_set:
self.set_index_lists()
self._indices_set = True
if name is None:
raise Exception("No name specified.")
if len(name) != self.order:
raise Exception("Name does not match order of the tensor.")
if self.active_indices is None:
raise Exception("Need to set an active space in order to call this function.")
idx_lists = []
# Parse name as string of space indices
for char in name:
if char.lower() == 'a':
idx_lists.append(self.active_indices)
elif char.lower() == 'p':
idx_lists.append(self._passive_indices)
elif char.lower() == 'f':
if self._size_full is None:
idx_lists.append(None)
else:
idx_lists.append(self._full_indices)
else:
raise Exception("Need to specify a valid letter (a,p,f).")
out = self.sub_lists(idx_lists)
return out
def reorder(self, to: str = 'of'):
"""
Function to reorder tensors according to some convention.
Parameters
----------
to :
Ordering scheme of choice.
'openfermion', 'of' (default) :
openfermion - ordering, corresponds to integrals of the type
h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)
with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)
currently needed for dependencies on openfermion-library
'chem', 'c' :
quantum chemistry ordering, collect particle terms,
more convenient for real-space methods
h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)
This is output by psi4
'phys', 'p' :
typical physics ordering, integrals of type
h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)
with operators a^pq_rs = a^p a^q a_s a_r
Returns
-------
"""
if self.order != 4:
raise Exception('Reordering currently only implemented for two-body tensors.')
to = self.Ordering(to)
if self.ordering == to:
return self
elif self.ordering.is_chem():
if to.is_of():
self.elems = numpy.einsum("psqr -> pqrs", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("prqs -> pqrs", self.elems, optimize='greedy')
elif self.ordering.is_of():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> psqr", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("pqrs -> pqsr", self.elems, optimize='greedy')
elif self.ordering.is_phys():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> prqs", self.elems, optimize='greedy')
elif to.is_of():
self.elems = numpy.einsum("pqsr -> pqrs", self.elems, optimize='greedy')
return self
class QuantumChemistryBase:
def __init__(self, parameters: ParametersQC,
transformation: typing.Union[str, typing.Callable] = None,
active_orbitals: list = None,
*args,
**kwargs):
self.parameters = parameters
if "molecule" in kwargs:
self.molecule = kwargs["molecule"]
else:
self.molecule = self.make_molecule(*args, **kwargs)
assert (parameters.basis_set.lower() == self.molecule.basis.lower())
assert (parameters.multiplicity == self.molecule.multiplicity)
assert (parameters.charge == self.molecule.charge)
self.active_space = None
if active_orbitals is not None:
self.active_space = self._make_active_space_data(active_orbitals=active_orbitals)
self.transformation = self._initialize_transformation(transformation=transformation, *args, **kwargs)
self._rdm1 = None
self._rdm2 = None
def _initialize_transformation(self, transformation=None, *args, **kwargs):
if transformation is None:
transformation = "JordanWigner"
# filter out arguments to the transformation
trafo_args = {k.split("__")[1]: v for k, v in kwargs.items() if
(hasattr(k, "lower") and "transformation__" in k.lower())}
trafo_args["n_electrons"] = self.n_electrons
trafo_args["n_orbitals"] = self.n_orbitals
if hasattr(transformation, "upper"):
# format to conventions
transformation = transformation.replace("_", "").replace("-", "").upper()
encodings = known_encodings()
if transformation in encodings:
transformation = encodings[transformation](**trafo_args)
else:
raise TequilaException(
"Unkown Fermion-to-Qubit encoding {}. Try something like: {}".format(transformation,
list(encodings.keys())))
return transformation
def _make_active_space_data(self, active_orbitals, reference=None):
"""
Small helper function
Internal use only
Parameters
----------
active_orbitals: dictionary :
list: Give a list of spatial orbital indices
i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used
reference: (Default value=None)
List of orbitals which form the reference
Can be given in the same format as active_orbitals
If given as None then the first N_electron/2 orbitals are taken
for closed-shell systems.
Returns
-------
Dataclass with active indices and reference indices (in spatial notation)
"""
if active_orbitals is None:
return None
if reference is None:
# auto assignment only for closed-shell
assert (self.n_electrons % 2 == 0)
reference = sorted([i for i in range(self.n_electrons // 2)])
return ActiveSpaceData(active_orbitals=sorted(active_orbitals),
reference_orbitals=sorted(reference))
@classmethod
def from_openfermion(cls, molecule: openfermion.MolecularData,
transformation: typing.Union[str, typing.Callable] = None,
*args,
**kwargs):
"""
Initialize direclty from openfermion MolecularData object
Parameters
----------
molecule
The openfermion molecule
Returns
-------
The Tequila molecule
"""
parameters = ParametersQC(basis_set=molecule.basis, geometry=molecule.geometry,
description=molecule.description, multiplicity=molecule.multiplicity,
charge=molecule.charge)
return cls(parameters=parameters, transformation=transformation, molecule=molecule, *args, **kwargs)
def make_excitation_generator(self,
indices: typing.Iterable[typing.Tuple[int, int]],
form: str = None,
remove_constant_term: bool = True) -> QubitHamiltonian:
"""
Notes
----------
Creates the transformed hermitian generator of UCC type unitaries:
M(a^\dagger_{a_0} a_{i_0} a^\dagger{a_1}a_{i_1} ... - h.c.)
where the qubit map M depends is self.transformation
Parameters
----------
indices : typing.Iterable[typing.Tuple[int, int]] :
List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)
can also be given as one big list: [a_0, i_0, a_1, i_1 ...]
form : str : (Default value None):
Manipulate the generator to involution or projector
set form='involution' or 'projector'
the default is no manipulation which gives the standard fermionic excitation operator back
remove_constant_term: bool: (Default value True):
by default the constant term in the qubit operator is removed since it has no effect on the unitary it generates
if the unitary is controlled this might not be true!
Returns
-------
type
1j*Transformed qubit excitation operator, depends on self.transformation
"""
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet")
# check indices and convert to list of tuples if necessary
if len(indices) == 0:
raise TequilaException("make_excitation_operator: no indices given")
elif not isinstance(indices[0], typing.Iterable):
if len(indices) % 2 != 0:
raise TequilaException("make_excitation_generator: unexpected input format of indices\n"
"use list of tuples as [(a_0, i_0),(a_1, i_1) ...]\n"
"or list as [a_0, i_0, a_1, i_1, ... ]\n"
"you gave: {}".format(indices))
converted = [(indices[2 * i], indices[2 * i + 1]) for i in range(len(indices) // 2)]
else:
converted = indices
# convert everything to native python int
# otherwise openfermion will complain
converted = [(int(pair[0]), int(pair[1])) for pair in converted]
# convert to openfermion input format
ofi = []
dag = []
for pair in converted:
assert (len(pair) == 2)
ofi += [(int(pair[0]), 1),
(int(pair[1]), 0)] # openfermion does not take other types of integers like numpy.int64
dag += [(int(pair[0]), 0), (int(pair[1]), 1)]
op = openfermion.FermionOperator(tuple(ofi), 1.j) # 1j makes it hermitian
op += openfermion.FermionOperator(tuple(reversed(dag)), -1.j)
if isinstance(form, str) and form.lower() != 'fermionic':
# indices for all the Na operators
Na = [x for pair in converted for x in [(pair[0], 1), (pair[0], 0)]]
# indices for all the Ma operators (Ma = 1 - Na)
Ma = [x for pair in converted for x in [(pair[0], 0), (pair[0], 1)]]
# indices for all the Ni operators
Ni = [x for pair in converted for x in [(pair[1], 1), (pair[1], 0)]]
# indices for all the Mi operators
Mi = [x for pair in converted for x in [(pair[1], 0), (pair[1], 1)]]
# can gaussianize as projector or as involution (last is default)
if form.lower() == "p+":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, 0.5)
op += openfermion.FermionOperator(Ni + Ma, 0.5)
elif form.lower() == "p-":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, -0.5)
op += openfermion.FermionOperator(Ni + Ma, -0.5)
elif form.lower() == "g+":
op += openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
elif form.lower() == "g-":
op += openfermion.FermionOperator([], -1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, 1.0)
op += openfermion.FermionOperator(Ni + Ma, 1.0)
elif form.lower() == "p0":
# P0: we only construct P0 and don't keep the original generator
op = openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
else:
raise TequilaException(
"Unknown generator form {}, supported are G, P+, P-, G+, G- and P0".format(form))
qop = self.transformation(op)
# remove constant terms
# they have no effect in the unitary (if not controlled)
if remove_constant_term:
qop.qubit_operator.terms[tuple()] = 0.0
# check if the operator is hermitian and cast coefficients to floats
# in order to avoid trouble with the simulation backends
assert qop.is_hermitian()
for k, v in qop.qubit_operator.terms.items():
qop.qubit_operator.terms[k] = to_float(v)
qop = qop.simplify()
if len(qop) == 0:
warnings.warn("Excitation generator is a unit operator.\n"
"Non-standard transformations might not work with general fermionic operators\n"
"indices = " + str(indices), category=TequilaWarning)
return qop
def make_hardcore_boson_excitation_gate(self, indices, angle, control=None, assume_real=True, compile_options="optimize"):
target = []
for pair in indices:
assert len(pair) == 2
target += [pair[0], pair[1]]
consistency = [x < self.n_orbitals for x in target]
if not all(consistency):
raise TequilaException(
"make_hardcore_boson_excitation_gate: Inconsistencies in indices={}. Should be indexed from 0 ... n_orbitals={}".format(
indices, self.n_orbitals))
return gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, control=control, compile_options=compile_options)
def make_excitation_gate(self, indices, angle, control=None, assume_real=True, **kwargs):
"""
Initialize a fermionic excitation gate defined as
.. math::
e^{-i\\frac{a}{2} G}
with generator defines by the indices [(p0,q0),(p1,q1),...]
.. math::
G = i(\\prod_{k} a_{p_k}^\\dagger a_{q_k} - h.c.)
Parameters
----------
indices:
List of tuples that define the generator
angle:
Numeric or hashable type or tequila objective
control:
List of possible control qubits
assume_real:
Assume that the wavefunction will always stay real.
Will reduce potential gradient costs by a factor of 2
"""
generator = self.make_excitation_generator(indices=indices, remove_constant_term=control is None)
p0 = self.make_excitation_generator(indices=indices, form="P0", remove_constant_term=control is None)
return QCircuit.wrap_gate(
FermionicGateImpl(angle=angle, generator=generator, p0=p0, transformation=type(self.transformation).__name__.lower(), assume_real=assume_real, control=control, **kwargs))
def make_molecule(self, *args, **kwargs) -> MolecularData:
"""Creates a molecule in openfermion format by running psi4 and extracting the data
Will check for previous outputfiles before running
Will not recompute if a file was found
Parameters
----------
parameters :
An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4
The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file
Returns
-------
type
the molecule in openfermion.MolecularData format
"""
molecule = MolecularData(**self.parameters.molecular_data_param)
# try to load
do_compute = True
try:
import os
if os.path.exists(self.parameters.filename):
molecule.load()
do_compute = False
except OSError:
do_compute = True
if do_compute:
molecule = self.do_make_molecule(*args, **kwargs)
molecule.save()
return molecule
def do_make_molecule(self, *args, **kwargs):
"""
Parameters
----------
args
kwargs
Returns
-------
"""
# integrals need to be passed in base class
assert ("one_body_integrals" in kwargs)
assert ("two_body_integrals" in kwargs)
one_body_integrals = kwargs["one_body_integrals"]
two_body_integrals = kwargs["two_body_integrals"]
# tequila assumes "openfermion" ordering, integrals can however be passed
# down in other orderings, but it needs to be indicated by keyword
if "ordering" in kwargs:
two_body_integrals = NBodyTensor(two_body_integrals, ordering=kwargs["ordering"])
two_body_integrals.reorder(to="openfermion")
two_body_integrals = two_body_integrals.elems
if "nuclear_repulsion" in kwargs:
nuclear_repulsion = kwargs["nuclear_repulsion"]
else:
nuclear_repulsion = 0.0
warnings.warn("No nuclear_repulsion given for custom molecule, setting to zero", category=TequilaWarning)
if ("n_orbitals" in kwargs):
n_orbitals = kwargs["n_orbitals"]
else:
n_orbitals = one_body_integrals.shape[0]
for i in [0, 1, 2, 3]:
assert n_orbitals == two_body_integrals.shape[i]
molecule = MolecularData(**self.parameters.molecular_data_param)
molecule.one_body_integrals = one_body_integrals
molecule.two_body_integrals = two_body_integrals
molecule.nuclear_repulsion = nuclear_repulsion
molecule.n_orbitals = n_orbitals
if "n_electrons" in kwargs:
molecule.n_electrons = kwargs["n_electrons"]
molecule.save()
return molecule
@property
def n_orbitals(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_orbitals
else:
return len(self.active_space.active_orbitals)
@property
def n_electrons(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_electrons
else:
return 2 * len(self.active_space.active_reference_orbitals)
def make_hamiltonian(self, occupied_indices=None, active_indices=None, threshold=1.e-8) -> QubitHamiltonian:
""" """
if occupied_indices is None and self.active_space is not None:
occupied_indices = self.active_space.frozen_reference_orbitals
if active_indices is None and self.active_space is not None:
active_indices = self.active_space.active_orbitals
fop = openfermion.transforms.get_fermion_operator(
self.molecule.get_molecular_hamiltonian(occupied_indices, active_indices))
try:
qop = self.transformation(fop)
except TypeError:
qop = self.transformation(openfermion.transforms.get_interaction_operator(fop))
qop.is_hermitian()
return qop
def make_hardcore_boson_hamiltonian(self):
if not self.transformation.up_then_down:
warnings.warn(
"Hardcore-Boson Hamiltonian without reordering will result in non-consecutive Hamiltonians that are eventually not be combinable with other features of tequila. Try transformation=\'ReorderedJordanWigner\' or similar for more consistency",
TequilaWarning)
# integrate with QubitEncoding at some point
n_orbitals = self.n_orbitals
c, obt, tbt = self.get_integrals()
h = numpy.zeros(shape=[n_orbitals] * 2)
g = numpy.zeros(shape=[n_orbitals] * 2)
for p in range(n_orbitals):
h[p, p] += 2 * obt[p, p]
for q in range(n_orbitals):
h[p, q] += + tbt[p, p, q, q]
if p != q:
g[p, q] += 2 * tbt[p, q, q, p] - tbt[p, q, p, q]
H = c
for p in range(n_orbitals):
for q in range(n_orbitals):
up = p
uq = q
H += h[p, q] * Sm(up) * Sp(uq) + g[p, q] * Sm(up) * Sp(up) * Sm(uq) * Sp(uq)
return H
def make_molecular_hamiltonian(self):
if self.active_space:
return self.molecule.get_molecular_hamiltonian(occupied_indices=self.active_space.frozen_reference_orbitals,
active_indices=self.active_space.active_orbitals)
else:
return self.molecule.get_molecular_hamiltonian()
def get_integrals(self, two_body_ordering="openfermion"):
"""
Returns
-------
Tuple with:
constant part (nuclear_repulsion + possible integrated parts from active-spaces)
one_body_integrals
two_body_integrals
"""
if self.active_space is not None and len(self.active_space.frozen_reference_orbitals) > 0:
c, h1, h2 = self.molecule.get_active_space_integrals(active_indices=self.active_space.active_orbitals,
occupied_indices=self.active_space.frozen_reference_orbitals)
else:
c = 0.0
h1 = self.molecule.one_body_integrals
h2 = self.molecule.two_body_integrals
c += self.molecule.nuclear_repulsion
h2 = NBodyTensor(h2, ordering="openfermion")
h2 = h2.reorder(to=two_body_ordering).elems
return c, h1, h2
def compute_one_body_integrals(self):
""" convenience function """
c, h1, h2 = self.get_integrals()
return h1
def compute_two_body_integrals(self, two_body_ordering="openfermion"):
""" """
c, h1, h2 = self.get_integrals(two_body_ordering=two_body_ordering)
return h2
def compute_constant_part(self):
c, h1, h2 = self.get_integrals()
return c
def compute_ccsd_amplitudes(self) -> ClosedShellAmplitudes:
""" """
raise Exception("BaseClass Method")
def prepare_reference(self, state=None, *args, **kwargs):
"""
Returns
-------
A tequila circuit object which prepares the reference of this molecule in the chosen transformation
"""
if state is None:
assert self.n_electrons %2 == 0
state = [0]*(self.n_orbitals*2)
for i in range(self.n_electrons):
state[i]=1
reference_state = BitString.from_array(self.transformation.map_state(state=state))
U = prepare_product_state(reference_state)
# prevent trace out in direct wfn simulation
U.n_qubits = self.n_orbitals*2 # adapt when tapered transformations work
return U
def prepare_hardcore_boson_reference(self):
# HF state in the HCB representation (paired electrons)
U = gates.X(target=[i for i in range(self.n_electrons // 2)])
U.n_qubits = self.n_orbitals
return U
def hcb_to_me(self, U=None):
"""
Transform a circuit in the hardcore-boson encoding (HCB)
to the encoding of this molecule
HCB is supposed to be encoded on the first n_orbitals qubits
Parameters
----------
U: HCB circuit (using the alpha qubits)
Returns
-------
"""
if U is None:
U = QCircuit()
# consistency
consistency = [x < self.n_orbitals for x in U.qubits]
if not all(consistency):
warnings.warn(
"hcb_to_me: given circuit is not defined on the first {} qubits. Is this a HCB circuit?".format(
self.n_orbitals))
# map to alpha qubits
alpha_map = {k: self.transformation.up(k) for k in range(self.n_orbitals)}
alpha_U = U.map_qubits(qubit_map=alpha_map)
UX = self.transformation.hcb_to_me()
if UX is None:
raise TequilaException(
"transformation={} has no hcb_to_me function implemented".format(self.transformation))
return alpha_U + UX
def get_pair_specific_indices(self,
pair_info: str = None,
include_singles: bool = True,
general_excitations: bool = True) -> list:
"""
Assuming a pair-specific model, create a pair-specific index list
to be used in make_upccgsd_ansatz(indices = ... )
Excite from a set of references (i) to any pair coming from (i),
i.e. any (i,j)/(j,i). If general excitations are allowed, also
allow excitations from pairs to appendant pairs and reference.
Parameters
----------
pair_info
file or list including information about pair structure
references single number, pair double
example: as file: "0,1,11,11,00,10" (hand over file name)
in file, skip first row assuming some text with information
as list:['0','1`','11','11','00','10']
~> two reference orbitals 0 and 1,
then two orbitals from pair 11, one from 00, one mixed 10
include_singles
include single excitations
general_excitations
allow general excitations
Returns
-------
list of indices with pair-specific ansatz
"""
if pair_info is None:
raise TequilaException("Need to provide some pair information.")
# If pair-information given on file, load (layout see above)
if isinstance(pair_info, str):
pairs = numpy.loadtxt(pair_info, dtype=str, delimiter=",", skiprows=1)
elif isinstance(pair_info, list):
pairs = pair_info
elif not isinstance(pair_info, list):
raise TequilaException("Pair information needs to be contained in a list or filename.")
connect = [[]] * len(pairs)
# determine "connectivity"
generalized = 0
for idx, p in enumerate(pairs):
if len(p) == 1:
connect[idx] = [i for i in range(len(pairs))
if ((len(pairs[i]) == 2) and (str(idx) in pairs[i]))]
elif (len(p) == 2) and general_excitations:
connect[idx] = [i for i in range(len(pairs))
if (((p[0] in pairs[i]) or (p[1] in pairs[i]) or str(i) in p)
and not (i == idx))]
elif len(p) > 2:
raise TequilaException("Invalid reference of pair id.")
# create generating indices from connectivity
indices = []
for i, to in enumerate(connect):
for a in to:
indices.append(((2 * i, 2 * a), (2 * i + 1, 2 * a + 1)))
if include_singles:
indices.append(((2 * i, 2 * a)))
indices.append(((2 * i + 1, 2 * a + 1)))
return indices
# MASKED: format_excitation_indices function (lines 1205-1216)
def make_upccgsd_indices(self, key, reference_orbitals=None, *args, **kwargs):
if reference_orbitals is None:
reference_orbitals = [i for i in range(self.n_electrons // 2)]
indices = []
# add doubles in hcb encoding
if hasattr(key, "lower") and key.lower() == "ladder":
# ladder structure of the pair excitations
# ensures local connectivity
indices = [[(n, n + 1)] for n in range(self.n_orbitals - 1)]
elif hasattr(key, "lower") and "g" not in key.lower():
indices = [[(n, m)] for n in reference_orbitals for m in range(self.n_orbitals) if
n < m and m not in reference_orbitals]
elif hasattr(key, "lower") and "g" in key.lower():
indices = [[(n, m)] for n in range(self.n_orbitals) for m in range(self.n_orbitals) if n < m]
else:
raise TequilaException("Unknown recipe: {}".format(key))
indices = [self.format_excitation_indices(idx) for idx in indices]
return indices
def make_hardcore_boson_upccgd_layer(self,
indices: list = "UpCCGD",
label: str = None,
assume_real: bool = True,
*args, **kwargs):
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices.lower())
UD = QCircuit()
for idx in indices:
UD += self.make_hardcore_boson_excitation_gate(indices=idx, angle=(idx, "D", label),
assume_real=assume_real)
return UD
def make_ansatz(self, name:str, *args, **kwargs):
name = name.lower()
if name.strip()=="":
return QCircuit()
if "+" in name:
U = QCircuit()
subparts = name.split("+")
U = self.make_ansatz(name=subparts[0], *args ,**kwargs)
if "include_reference" in kwargs:
kwargs.pop("include_reference")
if "hcb_optimization" in kwargs:
kwargs.pop("hcb_optimization")
for subpart in subparts[1:]:
U += self.make_ansatz(name=subpart, *args, include_reference=False, hcb_optimization=False, **kwargs)
return U
if name=="uccsd":
return self.make_uccsd_ansatz(*args, **kwargs)
elif "d" in name or "s" in name:
return self.make_upccgsd_ansatz(name=name, *args, **kwargs)
else:
raise TequilaException("unknown ansatz with name={}".format(name))
def make_upccgsd_ansatz(self,
include_reference: bool = True,
name: str = "UpCCGSD",
label: str = None,
order: int = None,
assume_real: bool = True,
hcb_optimization: bool = None,
spin_adapt_singles: bool = True,
neglect_z = False,
*args, **kwargs):
"""
UpGCCSD Ansatz similar as described by Lee et. al.
Parameters
----------
include_singles
include singles excitations. Is overwritten if indices are a string (i.e. indices=UpCCGSD will always include singles, UpCCGD will not)
include_reference
include the HF reference state as initial state
indices
pass custom defined set of indices from which the ansatz will be created
List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]
label
An additional label that is set with the variables
default is None and no label will be set: variables names will be
(x, (p,q)) for x in range(order)
with a label the variables will be named
(label, (x, (p,q)))
order
Order of the ansatz (default is 1)
determines how often the ordering gets repeated
parameters of repeating layers are independent
assume_real
assume a real wavefunction (that is always the case if the reference state is real)
reduces potential gradient costs from 4 to 2
Returns
-------
UpGCCSD ansatz
"""
name = name.upper()
if ("A" in name) and neglect_z is None:
neglect_z = True
else:
neglect_z = False
if order is None:
try:
if "-" in name:
order = int(name.split("-")[0])
else:
order = 1
except:
order = 1
indices = self.make_upccgsd_indices(key=name)
# check if the used qubit encoding has a hcb transformation
have_hcb_trafo = self.transformation.hcb_to_me() is not None
# consistency checks for optimization
if have_hcb_trafo and hcb_optimization is None:
hcb_optimization = True
if "HCB" in name:
hcb_optimization = True
if hcb_optimization and not have_hcb_trafo and "HCB" not in name:
raise TequilaException(
"use_hcb={} but transformation={} has no \'hcb_to_me\' function. Try transformation=\'ReorderedJordanWigner\'".format(
hcb_optimization, self.transformation))
if "S" in name and "HCB" in name:
if "HCB" in name and "S" in name:
raise Exception(
"name={}, Singles can't be realized without mapping back to the standard encoding leave S or HCB out of the name".format(
name))
# first layer
if not hcb_optimization:
U = QCircuit()
if include_reference:
U = self.prepare_reference()
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, assume_real=assume_real,
label=(label, 0), spin_adapt_singles=spin_adapt_singles, *args, **kwargs)
else:
U = QCircuit()
if include_reference:
U = self.prepare_hardcore_boson_reference()
U += self.make_hardcore_boson_upccgd_layer(indices=indices, assume_real=assume_real, label=(label, 0),
*args, **kwargs)
if "HCB" not in name:
U = self.hcb_to_me(U=U)
if "S" in name:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=(label, 0),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z, *args, **kwargs)
for k in range(1, order):
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, label=(label, k),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z)
return U
def make_upccgsd_layer(self, indices, include_singles=True, include_doubles=True, assume_real=True, label=None,
spin_adapt_singles: bool = True, angle_transform=None, mix_sd=False, neglect_z=False, *args, **kwargs):
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
angle = (tuple([idx]), "D", label)
if include_doubles:
if "jordanwigner" in self.transformation.name.lower() and not self.transformation.up_then_down:
# we can optimize with qubit excitations for the JW representation
target=[self.transformation.up(idx[0]), self.transformation.up(idx[1]), self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle,
indices=((2 * idx[0], 2 * idx[1]), (2 * idx[0] + 1, 2 * idx[1] + 1)),
assume_real=assume_real, **kwargs)
if include_singles and mix_sd:
U += self.make_upccgsd_singles(indices=[idx], assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
if include_singles and not mix_sd:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
return U
def make_upccgsd_singles(self, indices="UpCCGSD", spin_adapt_singles=True, label=None, angle_transform=None,
assume_real=True, neglect_z=False, *args, **kwargs):
if neglect_z and "jordanwigner" not in self.transformation.name.lower():
raise TequilaException("neglegt-z approximation in UpCCGSD singles needs the (Reversed)JordanWigner representation")
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices)
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
if spin_adapt_singles:
angle = (idx, "S", label)
if angle_transform is not None:
angle = angle_transform(angle)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=targeta, assume_real=assume_real, **kwargs)
U += gates.QubitExcitation(angle=angle, target=targetb, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0], 2 * idx[1])], assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
else:
angle1 = (idx, "SU", label)
angle2 = (idx, "SD", label)
if angle_transform is not None:
angle1 = angle_transform(angle1)
angle2 = angle_transform(angle2)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle1, target=targeta, assume_real=assume_real, *kwargs)
U += gates.QubitExcitation(angle=angle2, target=targetb, assume_real=assume_real, *kwargs)
else:
U += self.make_excitation_gate(angle=angle1, indices=[(2 * idx[0], 2 * idx[1])],
assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle2, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
return U
def make_uccsd_ansatz(self, trotter_steps: int=1,
initial_amplitudes: typing.Union[str, Amplitudes, ClosedShellAmplitudes] = "mp2",
include_reference_ansatz=True,
parametrized=True,
threshold=1.e-8,
add_singles=None,
*args, **kwargs) -> QCircuit:
"""
Parameters
----------
initial_amplitudes :
initial amplitudes given as ManyBodyAmplitudes structure or as string
where 'mp2', 'cc2' or 'ccsd' are possible initializations
include_reference_ansatz :
Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)
parametrized :
Initialize with variables, otherwise with static numbers (Default value = True)
trotter_steps: int :
initial_amplitudes: typing.Union[str :
Amplitudes :
ClosedShellAmplitudes] :
(Default value = "cc2")
Returns
-------
type
Parametrized QCircuit
"""
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2" and add_singles is None:
add_singles=True
elif initial_amplitudes is not None and add_singles is not None:
warnings.warn("make_uccsd_anstatz: add_singles has no effect when explicit amplitudes are passed down", TequilaWarning)
elif add_singles is None:
add_singles=True
if self.n_electrons % 2 != 0:
raise TequilaException("make_uccsd_ansatz currently only for closed shell systems")
nocc = self.n_electrons // 2
nvirt = self.n_orbitals - nocc
Uref = QCircuit()
if include_reference_ansatz:
Uref = self.prepare_reference()
amplitudes = initial_amplitudes
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2":
amplitudes = self.compute_mp2_amplitudes()
elif initial_amplitudes.lower() == "ccsd":
amplitudes = self.compute_ccsd_amplitudes()
else:
try:
amplitudes = self.compute_amplitudes(method=initial_amplitudes.lower())
except Exception as exc:
raise TequilaException(
"{}\nDon't know how to initialize \'{}\' amplitudes".format(exc, initial_amplitudes))
if amplitudes is None:
tia=None
if add_singles: tia=numpy.zeros(shape=[nocc, nvirt])
amplitudes = ClosedShellAmplitudes(
tIjAb=numpy.zeros(shape=[nocc, nocc, nvirt, nvirt]),
tIA=tia)
closed_shell = isinstance(amplitudes, ClosedShellAmplitudes)
indices = {}
if not isinstance(amplitudes, dict):
amplitudes = amplitudes.make_parameter_dictionary(threshold=threshold)
amplitudes = dict(sorted(amplitudes.items(), key=lambda x: numpy.fabs(x[1]), reverse=True))
for key, t in amplitudes.items():
assert (len(key) % 2 == 0)
if not numpy.isclose(t, 0.0, atol=threshold):
if closed_shell:
if len(key) == 2 and add_singles:
# singles
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_a = (2*key[0], 2*key[1])
idx_b = (2*key[0]+1, 2*key[1]+1)
indices[idx_a]=angle
indices[idx_b]=angle
else:
assert len(key)==4
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_abab=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2], 2 * key[3])
indices[idx_abab]=angle
if key[0]!=key[2] and key[1]!=key[3]:
idx_aaaa=(2 * key[0], 2 * key[1], 2 * key[2], 2 * key[3])
idx_bbbb=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2]+1, 2 * key[3]+1)
partner = tuple([key[2], key[1], key[0], key[3]])
anglex=2.0*(t - amplitudes[partner])
if parametrized:
anglex=2.0*(Variable(name=key) - Variable(partner))
indices[idx_aaaa]=anglex
indices[idx_bbbb]=anglex
else:
raise Exception("only closed-shell supported, please assemble yourself .... sorry :-)")
UCCSD = QCircuit()
factor = 1.0 / trotter_steps
for step in range(trotter_steps):
for idx, angle in indices.items():
UCCSD += self.make_excitation_gate(indices=idx, angle=factor * angle)
if hasattr(initial_amplitudes,"lower") and initial_amplitudes.lower()=="mp2" and parametrized and add_singles:
# mp2 has no singles, need to initialize them here (if not parametrized initializling as 0.0 makes no sense though)
UCCSD += self.make_upccgsd_layer(indices="upccsd", include_singles=True, include_doubles=False)
return Uref + UCCSD
def compute_amplitudes(self, method: str, *args, **kwargs):
"""
Compute closed-shell CC amplitudes
Parameters
----------
method :
coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)
Success might depend on backend
got an extra function for MP2
*args :
**kwargs :
Returns
-------
"""
raise TequilaException("compute amplitudes: Needs to be overwritten by backend")
def compute_mp2_amplitudes(self) -> ClosedShellAmplitudes:
"""
Compute closed-shell mp2 amplitudes
.. math::
t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )
:return:
Parameters
----------
Returns
-------
"""
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.molecule.n_electrons // 2 # this is never the active space
ei = fij[:nocc]
ai = fij[nocc:]
abgij = g[nocc:, nocc:, :nocc, :nocc]
amplitudes = abgij * 1.0 / (
ei.reshape(1, 1, -1, 1) + ei.reshape(1, 1, 1, -1) - ai.reshape(-1, 1, 1, 1) - ai.reshape(1, -1, 1, 1))
E = 2.0 * numpy.einsum('abij,abij->', amplitudes, abgij) - numpy.einsum('abji,abij', amplitudes, abgij,
optimize='greedy')
self.molecule.mp2_energy = E + self.molecule.hf_energy
return ClosedShellAmplitudes(tIjAb=numpy.einsum('abij -> ijab', amplitudes, optimize='greedy'))
def compute_cis_amplitudes(self):
"""
Compute the CIS amplitudes of the molecule
"""
@dataclass
class ResultCIS:
""" """
omegas: typing.List[numbers.Real] # excitation energies [omega0, ...]
amplitudes: typing.List[ClosedShellAmplitudes] # corresponding amplitudes [x_{ai}_0, ...]
def __getitem__(self, item):
return (self.omegas[item], self.amplitudes[item])
def __len__(self):
return len(self.omegas)
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.n_alpha_electrons
nvirt = self.n_orbitals - nocc
pairs = []
for i in range(nocc):
for a in range(nocc, nocc + nvirt):
pairs.append((a, i))
M = numpy.ndarray(shape=[len(pairs), len(pairs)])
for xx, x in enumerate(pairs):
eia = fij[x[0]] - fij[x[1]]
a, i = x
for yy, y in enumerate(pairs):
b, j = y
delta = float(y == x)
gpart = 2.0 * g[a, i, b, j] - g[a, i, j, b]
M[xx, yy] = eia * delta + gpart
omega, xvecs = numpy.linalg.eigh(M)
# convert amplitudes to ndarray sorted by excitation energy
nex = len(omega)
amplitudes = []
for ex in range(nex):
t = numpy.ndarray(shape=[nvirt, nocc])
exvec = xvecs[ex]
for xx, x in enumerate(pairs):
a, i = x
t[a - nocc, i] = exvec[xx]
amplitudes.append(ClosedShellAmplitudes(tIA=t))
return ResultCIS(omegas=list(omega), amplitudes=amplitudes)
@property
def rdm1(self):
"""
Returns RMD1 if computed with compute_rdms function before
"""
if self._rdm1 is not None:
return self._rdm1
else:
print("1-RDM has not been computed. Return None for 1-RDM.")
return None
@property
def rdm2(self):
"""
Returns RMD2 if computed with compute_rdms function before
This is returned in Dirac (physics) notation by default (can be changed in compute_rdms with keyword)!
"""
if self._rdm2 is not None:
return self._rdm2
else:
print("2-RDM has not been computed. Return None for 2-RDM.")
return None
def compute_rdms(self, U: QCircuit = None, variables: Variables = None, spin_free: bool = True,
get_rdm1: bool = True, get_rdm2: bool = True, ordering="dirac"):
"""
Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given
a unitary U. This method uses the standard ordering in physics as denoted below.
Note, that the representation of the density matrices depends on the qubit transformation
used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density
matrices in the occupation picture.
We only consider real orbitals and thus real-valued RDMs.
The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.
.. math :
\\text{rdm1: } \\gamma^p_q = \\langle \\psi | a^p a_q | \\psi \\rangle
= \\langle U 0 | a^p a_q | U 0 \\rangle
\\text{rdm2: } \\gamma^{pq}_{rs} = \\langle \\psi | a^p a^q a_s a_r | \\psi \\rangle
= \\langle U 0 | a^p a^q a_s a_r | U 0 \\rangle
Parameters
----------
U :
Quantum Circuit to achieve the desired state \\psi = U |0\\rangle, non-optional
variables :
If U is parametrized, then need to hand over a set of fixed variables
spin_free :
Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals
get_rdm1, get_rdm2 :
Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,
it is recommended to compute them at once.
Returns
-------
"""
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
# Check whether transformation is BKSF.
# Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct
# transformation, because it computes the number of qubits incorrectly in this case.
# A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet.")
# Set up number of spin-orbitals and molecular orbitals respectively
n_SOs = 2 * self.n_orbitals
n_MOs = self.n_orbitals
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
def _get_of_op(operator_tuple):
""" Returns operator given by a operator tuple as OpenFermion - Fermion operator """
op = openfermion.FermionOperator(operator_tuple)
return op
def _get_qop_hermitian(of_operator) -> QubitHamiltonian:
""" Returns Hermitian part of Fermion operator as QubitHamiltonian """
qop = self.transformation(of_operator)
#qop = QubitHamiltonian(self.transformation(of_operator))
real, imag = qop.split(hermitian=True)
if real:
return real
elif not real:
raise TequilaException(
"Qubit Hamiltonian does not have a Hermitian part. Operator ={}".format(of_operator))
def _build_1bdy_operators_spinful() -> list:
""" Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp
ops = []
for p in range(n_SOs):
for q in range(p + 1):
op_tuple = ((p, 1), (q, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinful() -> list:
""" Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = -pqsr = -qprs = qpsr
# and = rspq
ops = []
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
op_tuple = ((p, 1), (q, 1), (s, 0), (r, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_1bdy_operators_spinfree() -> list:
""" Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp (not changed by spin-summation)
ops = []
for p in range(n_MOs):
for q in range(p + 1):
# Spin aa
op_tuple = ((2 * p, 1), (2 * q, 0))
op = _get_of_op(op_tuple)
# Spin bb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 0))
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinfree() -> list:
""" Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out)
# and = rspq
ops = []
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
# Spin aaaa
op_tuple = ((2 * p, 1), (2 * q, 1), (2 * s, 0), (2 * r, 0)) if (p != q and r != s) else '0.0 []'
op = _get_of_op(op_tuple)
# Spin abab
op_tuple = ((2 * p, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r, 0)) if (
2 * p != 2 * q + 1 and 2 * r != 2 * s + 1) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin baba
op_tuple = ((2 * p + 1, 1), (2 * q, 1), (2 * s, 0), (2 * r + 1, 0)) if (
2 * p + 1 != 2 * q and 2 * r + 1 != 2 * s) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin bbbb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r + 1, 0)) if (
p != q and r != s) else '0.0 []'
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _assemble_rdm1(evals) -> numpy.ndarray:
"""
Returns spin-ful or spin-free one-particle RDM built by symmetry conditions
Same symmetry with or without spin, so we can use the same function
"""
N = n_MOs if spin_free else n_SOs
rdm1 = numpy.zeros([N, N])
ctr: int = 0
for p in range(N):
for q in range(p + 1):
rdm1[p, q] = evals[ctr]
# Symmetry pq = qp
rdm1[q, p] = rdm1[p, q]
ctr += 1
return rdm1
def _assemble_rdm2_spinful(evals) -> numpy.ndarray:
""" Returns spin-ful two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_SOs, n_SOs, n_SOs, n_SOs])
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetries due to anticommutation relations
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
rdm2[p, q, s, r] = -1 * rdm2[p, q, r, s] # pqrs = -pqsr
rdm2[q, p, r, s] = -1 * rdm2[p, q, r, s] # pqrs = -qprs
rdm2[q, p, s, r] = rdm2[p, q, r, s] # pqrs = qpsr
return rdm2
def _assemble_rdm2_spinfree(evals) -> numpy.ndarray:
""" Returns spin-free two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetry: pqrs = qpsr
for p, q, r, s in product(range(n_MOs), repeat=4):
if p >= q or r >= s:
rdm2[q, p, s, r] = rdm2[p, q, r, s]
return rdm2
# Build operator lists
qops = []
if spin_free:
qops += _build_1bdy_operators_spinfree() if get_rdm1 else []
qops += _build_2bdy_operators_spinfree() if get_rdm2 else []
else:
qops += _build_1bdy_operators_spinful() if get_rdm1 else []
qops += _build_2bdy_operators_spinful() if get_rdm2 else []
# Transform operator lists to QubitHamiltonians
qops = [_get_qop_hermitian(op) for op in qops]
# Compute expected values
evals = simulate(ExpectationValue(H=qops, U=U, shape=[len(qops)]), variables=variables)
# Assemble density matrices
# If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type
def _reset_rdm(rdm):
if rdm is not None:
if spin_free and rdm.shape[0] != n_MOs:
return None
if not spin_free and rdm.shape[0] != n_SOs:
return None
return rdm
self._rdm1 = _reset_rdm(self._rdm1)
self._rdm2 = _reset_rdm(self._rdm2)
# Split expectation values in 1- and 2-particle expectation values
if get_rdm1:
len_1 = n_MOs * (n_MOs + 1) // 2 if spin_free else n_SOs * (n_SOs + 1) // 2
else:
len_1 = 0
evals_1, evals_2 = evals[:len_1], evals[len_1:]
# Build matrices using the expectation values
self._rdm1 = _assemble_rdm1(evals_1) if get_rdm1 else self._rdm1
if spin_free:
self._rdm2 = _assemble_rdm2_spinfree(evals_2) if get_rdm2 else self._rdm2
else:
self._rdm2 = _assemble_rdm2_spinful(evals_2) if get_rdm2 else self._rdm2
if get_rdm2:
rdm2 = NBodyTensor(elems=self.rdm2, ordering="dirac")
rdm2.reorder(to=ordering)
rdm2 = rdm2.elems
self._rdm2 = rdm2
if get_rdm1:
if get_rdm2:
return self.rdm1, self.rdm2
else:
return self.rdm1
elif get_rdm2:
return self.rdm2
else:
warnings.warn("compute_rdms called with instruction to not compute?", TequilaWarning)
def rdm_spinsum(self, sum_rdm1: bool = True, sum_rdm2: bool = True) -> tuple:
"""
Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.
Parameters
----------
sum_rdm1, sum_rdm2 :
If set to true, perform spin summation on rdm1, rdm2
Returns
-------
rdm1_spinsum, rdm2_spinsum :
The desired spin-free matrices
"""
n_MOs = self.n_orbitals
rdm1_spinsum = None
rdm2_spinsum = None
# Spin summation on rdm1
if sum_rdm1:
# Check whether spin-rdm2 exists
if self._rdm1 is None:
raise TequilaException("The spin-RDM for the 1-RDM does not exist!")
# Check whether existing rdm1 is in spin-orbital basis
if self._rdm1.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm1_spinsum = numpy.zeros([n_MOs, n_MOs])
for p in range(n_MOs):
for q in range(p + 1):
rdm1_spinsum[p, q] += self._rdm1[2 * p, 2 * q]
rdm1_spinsum[p, q] += self._rdm1[2 * p + 1, 2 * q + 1]
for p in range(n_MOs):
for q in range(p):
rdm1_spinsum[q, p] = rdm1_spinsum[p, q]
# Spin summation on rdm2
if sum_rdm2:
# Check whether spin-rdm2 exists
if self._rdm2 is None:
raise TequilaException("The spin-RDM for the 2-RDM does not exist!")
# Check whether existing rdm2 is in spin-orbital basis
if self._rdm2.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm2_spinsum = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q, 2 * r, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1]
return rdm1_spinsum, rdm2_spinsum
def perturbative_f12_correction(self, rdm1: numpy.ndarray = None, rdm2: numpy.ndarray = None,
gamma: float = 1.4, n_ri: int = None,
external_info: dict = None, **kwargs) -> float:
"""
Computes the spin-free [2]_R12 correction, needing only the 1- and 2-RDM of a reference method
Requires either 1-RDM, 2-RDM or information to compute them in kwargs
Parameters
----------
rdm1 :
1-electron reduced density matrix
rdm2 :
2-electron reduced density matrix
gamma :
f12-exponent, for a correlation factor f_12 = -1/gamma * exp[-gamma*r_12]
n_ri :
dimensionality of RI-basis; specify only, if want to truncate available RI-basis
if None, then the maximum available via tensors / basis-set is used
must not be larger than size of available RI-basis, and not smaller than size of OBS
for n_ri==dim(OBS), the correction returns zero
external_info :
for usage in qc_base, need to provide information where to find one-body tensor f12-tensor <rs|f_12|pq>;
pass dictionary with {"f12_filename": where to find f12-tensor, "scheme": ordering scheme of tensor}
kwargs :
e.g. RDM-information via {"U": QCircuit, "variables": optimal angles}, needs to be passed if rdm1,rdm2 not
yet computed
Returns
-------
the f12 correction for the energy
"""
from .f12_corrections._f12_correction_base import ExplicitCorrelationCorrection
correction = ExplicitCorrelationCorrection(mol=self, rdm1=rdm1, rdm2=rdm2, gamma=gamma,
n_ri=n_ri, external_info=external_info, **kwargs)
return correction.compute()
def __str__(self) -> str:
result = str(type(self)) + "\n"
result += "Qubit Encoding\n"
result += str(self.transformation) + "\n\n"
result += "Parameters\n"
for k, v in self.parameters.__dict__.items():
result += "{key:15} : {value:15} \n".format(key=str(k), value=str(v))
result += "\n"
return result
|
def format_excitation_indices(self, idx):
"""
Consistent formatting of excitation indices
idx = [(p0,q0),(p1,q1),...,(pn,qn)]
sorted as: p0<p1<pn and pi<qi
:param idx: list of index tuples describing a single(!) fermionic excitation
:return: tuple-list of index tuples
"""
idx = [tuple(sorted(x)) for x in idx]
idx = sorted(idx, key=lambda x: x[0])
return tuple(idx)
| 1,205
| 1,216
|
import os
from dataclasses import dataclass
from tequila import TequilaException, BitString, TequilaWarning
from tequila.hamiltonian import QubitHamiltonian
from tequila.wavefunction import QubitWaveFunction
from tequila.hamiltonian.paulis import Sp, Sm, Qp, Qm
from tequila.circuit import QCircuit, gates, _gates_impl
from tequila.objective.objective import Variable, Variables, ExpectationValue
from tequila.simulators.simulator_api import simulate
from tequila.utils import to_float
from tequila.objective import assign_variable
from .encodings import known_encodings
import typing, numpy, numbers, copy
from itertools import product
# if you are experiencing import errors you need to update openfermion
# required is version >= 1.0
# otherwise replace with from openfermion.hamiltonians import MolecularData
import openfermion
from openfermion.chem import MolecularData
import warnings
@dataclass
class ActiveSpaceData:
active_orbitals: list # active orbitals (spatial, c1)
reference_orbitals: list # reference orbitals (spatial, c1)
def __str__(self):
result = "Active Space Data:\n"
result += "{key:15} : {value:15} \n".format(key="active_orbitals", value=str(self.active_orbitals))
result += "{key:15} : {value:15} \n".format(key="reference_orbitals",
value=str(self.reference_orbitals))
result += "{key:15} : {value:15} \n".format(key="frozen_docc", value=str(self.frozen_docc))
result += "{key:15} : {value:15} \n".format(key="frozen_uocc", value=str(self.frozen_uocc))
return result
@property
def frozen_reference_orbitals(self):
return [i for i in self.reference_orbitals if i not in self.active_orbitals]
@property
def active_reference_orbitals(self):
return [i for i in self.reference_orbitals if i in self.active_orbitals]
class FermionicGateImpl(gates.QubitExcitationImpl):
# keep the overview in circuits
def __init__(self, generator, p0, transformation, *args, **kwargs):
super().__init__(generator=generator, target=generator.qubits, p0=p0, *args, **kwargs)
self._name = "FermionicExcitation"
self.transformation=transformation
def compile(self):
return gates.Trotterized(generator=self.generator, control=self.control, angle=self.parameter, steps=1)
def prepare_product_state(state: BitString) -> QCircuit:
"""Small convenience function
Parameters
----------
state :
product state encoded into a bitstring
state: BitString :
Returns
-------
type
unitary circuit which prepares the product state
"""
result = QCircuit()
for i, v in enumerate(state.array):
if v == 1:
result += gates.X(target=i)
return result
@dataclass
class ParametersQC:
"""Specialization of ParametersHamiltonian"""
basis_set: str = None # Quantum chemistry basis set
geometry: str = None # geometry of the underlying molecule (units: Angstrom!),
# this can be a filename leading to an .xyz file or the geometry given as a string
description: str = ""
multiplicity: int = 1
charge: int = 0
name: str = None
@property
def n_electrons(self, *args, **kwargs):
return self.get_nuc_charge() - self.charge
def get_nuc_charge(self):
return sum(self.get_atom_number(name=atom) for atom in self.get_atoms())
def get_atom_number(self, name):
atom_numbers={"h":1, "he":2, "li":3, "be":4, "b":5, "c":6, "n":7, "o":8, "f":9, "ne":10, "na":11, "mg":12, "al":13, "si":14, "ph":15, "s":16, "cl":17, "ar":18}
if name.lower() in atom_numbers:
return atom_numbers[name.lower()]
try:
import periodictable as pt
atom=name.lower()
atom[0]=atom[0].upper()
element = pt.elements.symbol(atom)
return element.number()
except:
raise TequilaException("can not assign atomic number to element {}\npip install periodictable will fix it".format(atom))
def get_atoms(self):
return [x[0] for x in self.get_geometry()]
def __post_init__(self,*args, **kwargs):
if self.name is None and self.geometry is None:
raise TequilaException("no geometry or name given to molecule\nprovide geometry=filename.xyz or geometry=`h 0.0 0.0 0.0\\n...`\nor name=whatever with file whatever.xyz being present")
# auto naming
if self.name is None:
if ".xyz" in self.geometry:
self.name=self.geometry.split(".xyz")[0]
if self.description is None:
coord, description = self.read_xyz_from_file()
self.description=description
else:
atoms=self.get_atoms()
atom_names=sorted(list(set(atoms)), key=lambda x: self.get_atom_number(x), reverse=True)
if self.name is None:
drop_ones=lambda x: "" if x==1 else x
self.name="".join(["{}{}".format(x,drop_ones(atoms.count(x))) for x in atom_names])
self.name = self.name.lower()
if self.geometry is None:
self.geometry=self.name+".xyz"
if ".xyz" in self.geometry and not os.path.isfile(self.geometry):
raise TequilaException("could not find file for molecular coordinates {}".format(self.geometry))
@property
def filename(self):
""" """
return "{}_{}".format(self.name, self.basis_set)
@property
def molecular_data_param(self) -> dict:
""":return: Give back all parameters for the MolecularData format from openfermion as dictionary"""
return {'basis': self.basis_set, 'geometry': self.get_geometry(), 'description': self.description,
'charge': self.charge, 'multiplicity': self.multiplicity, 'filename': self.filename
}
@staticmethod
def format_element_name(string):
"""OpenFermion uses case sensitive hash tables for chemical elements
I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work
this convenience function does the naming
:return: first letter converted to upper rest to lower
Parameters
----------
string :
Returns
-------
"""
assert (len(string) > 0)
assert (isinstance(string, str))
fstring = string[0].upper() + string[1:].lower()
return fstring
@staticmethod
def convert_to_list(geometry):
"""Convert a molecular structure given as a string into a list suitable for openfermion
Parameters
----------
geometry :
a string specifying a mol. structure. E.g. geometry="h 0.0 0.0 0.0\n h 0.0 0.0 1.0"
Returns
-------
type
A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]
"""
result = []
# Remove blank lines
lines = [l for l in geometry.split("\n") if l]
for line in lines:
words = line.split()
# Pad coordinates
if len(words) < 4:
words += [0.0] * (4 - len(words))
try:
tmp = (ParametersQC.format_element_name(words[0]),
(float(words[1]), float(words[2]), float(words[3])))
result.append(tmp)
except ValueError:
print("get_geometry list unknown line:\n ", line, "\n proceed with caution!")
return result
def get_geometry_string(self) -> str:
"""returns the geometry as a string
:return: geometry string
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if comment is not None:
self.description = comment
return geomstring
else:
return self.geometry
def get_geometry(self):
"""Returns the geometry
If a xyz filename was given the file is read out
otherwise it is assumed that the geometry was given as string
which is then reformatted as a list usable as input for openfermion
:return: geometry as list
e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]
Units: Angstrom!
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if self.description == '':
self.description = comment
return self.convert_to_list(geomstring)
elif self.geometry is not None:
return self.convert_to_list(self.geometry)
else:
raise Exception("Parameters.qc.geometry is None")
@staticmethod
def read_xyz_from_file(filename):
"""Read XYZ filetype for molecular structures
https://en.wikipedia.org/wiki/XYZ_file_format
Units: Angstrom!
Parameters
----------
filename :
return:
Returns
-------
"""
with open(filename, 'r') as file:
content = file.readlines()
natoms = int(content[0])
comment = str(content[1]).strip('\n')
coord = ''
for i in range(natoms):
coord += content[2 + i]
return coord, comment
@dataclass
class ClosedShellAmplitudes:
""" """
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Returns
-------
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, J, A, B), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(nocc + A, I, nocc + B, J)] = value
if self.tIA is not None:
nocc = self.tIA.shape[0]
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(A + nocc, I)] = value
return dict(sorted(variables.items(), key=lambda x: numpy.abs(x[1]), reverse=True))
@dataclass
class Amplitudes:
"""Coupled-Cluster Amplitudes
We adopt the Psi4 notation for consistency
I,A for alpha
i,a for beta
Parameters
----------
Returns
-------
"""
@classmethod
def from_closed_shell(cls, cs: ClosedShellAmplitudes):
"""
Initialize from closed-shell Amplitude structure
Parameters
----------
cs: ClosedShellAmplitudes :
Returns
-------
"""
tijab = cs.tIjAb - numpy.einsum("ijab -> ijba", cs.tIjAb, optimize='greedy')
return cls(tIjAb=cs.tIjAb, tIA=cs.tIA, tiJaB=cs.tIjAb, tia=cs.tIA, tijab=tijab, tIJAB=tijab)
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
tiJaB: numpy.ndarray = None
tijab: numpy.ndarray = None
tIJAB: numpy.ndarray = None
tia: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Neglect amplitudes below the threshold
Returns
-------
Dictionary of tequila variables (hash is in the style of (a,i,b,j))
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, j, A, b), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + b) + 1, j + 1)] = value
for (i, J, a, B), value in numpy.ndenumerate(self.tiJaB):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + B), J)] = value
for (i, j, a, b), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + b) + 1, j + 1)] = value
for (I, J, A, B), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + B), J)] = value
if self.tIA is not None:
nocc = self.tIjAb.shape[0]
assert (self.tia.shape[0] == nocc)
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (A + nocc), 2 * I)] = value
for (i, a), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (a + nocc) + 1, 2 * i + 1)] = value
return variables
class NBodyTensor:
""" Convenience class for handling N-body tensors """
class Ordering:
def __init__(self, scheme):
if hasattr(scheme, "_scheme"):
scheme = scheme._scheme
elif hasattr(scheme, "scheme"):
scheme = scheme.scheme
self._scheme = self.assign_scheme(scheme)
def assign_scheme(self, scheme):
if scheme is None:
return "chem"
else:
scheme = str(scheme)
if scheme.lower() in ["mulliken", "chem", "c", "1122"]:
return "chem"
elif scheme.lower() in ["dirac", "phys", "p", "1212"]:
return "phys"
elif scheme.lower() in ["openfermion", "of", "o", "1221"]:
return "of"
else:
raise TequilaException(
"Unknown two-body tensor scheme {}. Supported are dirac, mulliken, and openfermion".format(scheme))
def is_phys(self):
return self._scheme == "phys"
def is_chem(self):
return self._scheme == "chem"
def is_of(self):
return self._scheme == "of"
def __init__(self, elems: numpy.ndarray = None, active_indices: list = None, ordering: str = None,
size_full: int = None):
"""
Parameters
----------
elems: Tensor data as numpy array
active_indices: List of active indices in total ordering
ordering: Ordering scheme for two body tensors
"dirac" or "phys": <12|g|12>
.. math::
g_{pqrs} = \\int d1 d2 p(1)q(2) g(1,2) r(1)s(2)
"mulliken" or "chem": (11|g|22)
.. math::
g_{pqrs} = \\int d1 d2 p(1)r(2) g(1,2) q(1)s(2)
"openfermion":
.. math:: [12|g|21]
g_{gqprs} = \\int d1 d2 p(1)q(2) g(1,2) s(1)r(2)
size_full
"""
# Set elements
self.elems = elems
# Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible
# representations
if active_indices is not None:
self.active_indices = active_indices
self._passive_indices = None
self._full_indices = None
self._indices_set: bool = False
# Determine order of tensor
# Assume, that tensor is entered in desired shape, not as flat array.
self.order = len(self.elems.shape)
# Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well
if size_full is None:
self._size_full = self.elems.shape[0]
else:
self._size_full = size_full
# 2-body tensors (<=> order 4) currently allow reordering
if self.order == 4:
self.ordering = self.Ordering(ordering)
else:
if ordering is not None:
raise Exception("Ordering only implemented for tensors of order 4 / 2-body tensors.")
self.ordering = None
def sub_lists(self, idx_lists: list = None) -> numpy.ndarray:
"""
Get subspace of tensor by a set of index lists
according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]
This essentially is an implementation of a non-contiguous slicing using numpy.take
Parameters
----------
idx_lists :
List of lists, each defining the desired subspace per axis
Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
# Check if index list has correct size
if len(idx_lists) != self.order:
raise Exception("Need to pass an index list for each dimension!" +
" Length of idx_lists needs to match order of tensor.")
# Perform slicing via numpy.take
out = self.elems
for ax in range(self.order):
if idx_lists[ax] is not None: # None means, we want the full space in this direction
out = numpy.take(out, idx_lists[ax], axis=ax)
return out
def set_index_lists(self):
""" Set passive and full index lists based on class inputs """
tmp_size = self._size_full
if self._size_full is None:
tmp_size = self.elems.shape[0]
self._passive_indices = [i for i in range(tmp_size)
if i not in self.active_indices]
self._full_indices = [i for i in range(tmp_size)]
def sub_str(self, name: str) -> numpy.ndarray:
"""
Get subspace of tensor by a string
Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.
Full space in this context may also be smaller than actual tensor dimension.
The specification of active space in this context only allows to pick a set from a list of orbitals, and
is not able to resolve an active space from irreducible representations.
Example for one-body tensor:
hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]
Parameters
----------
name :
String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
if not self._indices_set:
self.set_index_lists()
self._indices_set = True
if name is None:
raise Exception("No name specified.")
if len(name) != self.order:
raise Exception("Name does not match order of the tensor.")
if self.active_indices is None:
raise Exception("Need to set an active space in order to call this function.")
idx_lists = []
# Parse name as string of space indices
for char in name:
if char.lower() == 'a':
idx_lists.append(self.active_indices)
elif char.lower() == 'p':
idx_lists.append(self._passive_indices)
elif char.lower() == 'f':
if self._size_full is None:
idx_lists.append(None)
else:
idx_lists.append(self._full_indices)
else:
raise Exception("Need to specify a valid letter (a,p,f).")
out = self.sub_lists(idx_lists)
return out
def reorder(self, to: str = 'of'):
"""
Function to reorder tensors according to some convention.
Parameters
----------
to :
Ordering scheme of choice.
'openfermion', 'of' (default) :
openfermion - ordering, corresponds to integrals of the type
h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)
with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)
currently needed for dependencies on openfermion-library
'chem', 'c' :
quantum chemistry ordering, collect particle terms,
more convenient for real-space methods
h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)
This is output by psi4
'phys', 'p' :
typical physics ordering, integrals of type
h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)
with operators a^pq_rs = a^p a^q a_s a_r
Returns
-------
"""
if self.order != 4:
raise Exception('Reordering currently only implemented for two-body tensors.')
to = self.Ordering(to)
if self.ordering == to:
return self
elif self.ordering.is_chem():
if to.is_of():
self.elems = numpy.einsum("psqr -> pqrs", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("prqs -> pqrs", self.elems, optimize='greedy')
elif self.ordering.is_of():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> psqr", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("pqrs -> pqsr", self.elems, optimize='greedy')
elif self.ordering.is_phys():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> prqs", self.elems, optimize='greedy')
elif to.is_of():
self.elems = numpy.einsum("pqsr -> pqrs", self.elems, optimize='greedy')
return self
class QuantumChemistryBase:
def __init__(self, parameters: ParametersQC,
transformation: typing.Union[str, typing.Callable] = None,
active_orbitals: list = None,
*args,
**kwargs):
self.parameters = parameters
if "molecule" in kwargs:
self.molecule = kwargs["molecule"]
else:
self.molecule = self.make_molecule(*args, **kwargs)
assert (parameters.basis_set.lower() == self.molecule.basis.lower())
assert (parameters.multiplicity == self.molecule.multiplicity)
assert (parameters.charge == self.molecule.charge)
self.active_space = None
if active_orbitals is not None:
self.active_space = self._make_active_space_data(active_orbitals=active_orbitals)
self.transformation = self._initialize_transformation(transformation=transformation, *args, **kwargs)
self._rdm1 = None
self._rdm2 = None
def _initialize_transformation(self, transformation=None, *args, **kwargs):
if transformation is None:
transformation = "JordanWigner"
# filter out arguments to the transformation
trafo_args = {k.split("__")[1]: v for k, v in kwargs.items() if
(hasattr(k, "lower") and "transformation__" in k.lower())}
trafo_args["n_electrons"] = self.n_electrons
trafo_args["n_orbitals"] = self.n_orbitals
if hasattr(transformation, "upper"):
# format to conventions
transformation = transformation.replace("_", "").replace("-", "").upper()
encodings = known_encodings()
if transformation in encodings:
transformation = encodings[transformation](**trafo_args)
else:
raise TequilaException(
"Unkown Fermion-to-Qubit encoding {}. Try something like: {}".format(transformation,
list(encodings.keys())))
return transformation
def _make_active_space_data(self, active_orbitals, reference=None):
"""
Small helper function
Internal use only
Parameters
----------
active_orbitals: dictionary :
list: Give a list of spatial orbital indices
i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used
reference: (Default value=None)
List of orbitals which form the reference
Can be given in the same format as active_orbitals
If given as None then the first N_electron/2 orbitals are taken
for closed-shell systems.
Returns
-------
Dataclass with active indices and reference indices (in spatial notation)
"""
if active_orbitals is None:
return None
if reference is None:
# auto assignment only for closed-shell
assert (self.n_electrons % 2 == 0)
reference = sorted([i for i in range(self.n_electrons // 2)])
return ActiveSpaceData(active_orbitals=sorted(active_orbitals),
reference_orbitals=sorted(reference))
@classmethod
def from_openfermion(cls, molecule: openfermion.MolecularData,
transformation: typing.Union[str, typing.Callable] = None,
*args,
**kwargs):
"""
Initialize direclty from openfermion MolecularData object
Parameters
----------
molecule
The openfermion molecule
Returns
-------
The Tequila molecule
"""
parameters = ParametersQC(basis_set=molecule.basis, geometry=molecule.geometry,
description=molecule.description, multiplicity=molecule.multiplicity,
charge=molecule.charge)
return cls(parameters=parameters, transformation=transformation, molecule=molecule, *args, **kwargs)
def make_excitation_generator(self,
indices: typing.Iterable[typing.Tuple[int, int]],
form: str = None,
remove_constant_term: bool = True) -> QubitHamiltonian:
"""
Notes
----------
Creates the transformed hermitian generator of UCC type unitaries:
M(a^\dagger_{a_0} a_{i_0} a^\dagger{a_1}a_{i_1} ... - h.c.)
where the qubit map M depends is self.transformation
Parameters
----------
indices : typing.Iterable[typing.Tuple[int, int]] :
List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)
can also be given as one big list: [a_0, i_0, a_1, i_1 ...]
form : str : (Default value None):
Manipulate the generator to involution or projector
set form='involution' or 'projector'
the default is no manipulation which gives the standard fermionic excitation operator back
remove_constant_term: bool: (Default value True):
by default the constant term in the qubit operator is removed since it has no effect on the unitary it generates
if the unitary is controlled this might not be true!
Returns
-------
type
1j*Transformed qubit excitation operator, depends on self.transformation
"""
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet")
# check indices and convert to list of tuples if necessary
if len(indices) == 0:
raise TequilaException("make_excitation_operator: no indices given")
elif not isinstance(indices[0], typing.Iterable):
if len(indices) % 2 != 0:
raise TequilaException("make_excitation_generator: unexpected input format of indices\n"
"use list of tuples as [(a_0, i_0),(a_1, i_1) ...]\n"
"or list as [a_0, i_0, a_1, i_1, ... ]\n"
"you gave: {}".format(indices))
converted = [(indices[2 * i], indices[2 * i + 1]) for i in range(len(indices) // 2)]
else:
converted = indices
# convert everything to native python int
# otherwise openfermion will complain
converted = [(int(pair[0]), int(pair[1])) for pair in converted]
# convert to openfermion input format
ofi = []
dag = []
for pair in converted:
assert (len(pair) == 2)
ofi += [(int(pair[0]), 1),
(int(pair[1]), 0)] # openfermion does not take other types of integers like numpy.int64
dag += [(int(pair[0]), 0), (int(pair[1]), 1)]
op = openfermion.FermionOperator(tuple(ofi), 1.j) # 1j makes it hermitian
op += openfermion.FermionOperator(tuple(reversed(dag)), -1.j)
if isinstance(form, str) and form.lower() != 'fermionic':
# indices for all the Na operators
Na = [x for pair in converted for x in [(pair[0], 1), (pair[0], 0)]]
# indices for all the Ma operators (Ma = 1 - Na)
Ma = [x for pair in converted for x in [(pair[0], 0), (pair[0], 1)]]
# indices for all the Ni operators
Ni = [x for pair in converted for x in [(pair[1], 1), (pair[1], 0)]]
# indices for all the Mi operators
Mi = [x for pair in converted for x in [(pair[1], 0), (pair[1], 1)]]
# can gaussianize as projector or as involution (last is default)
if form.lower() == "p+":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, 0.5)
op += openfermion.FermionOperator(Ni + Ma, 0.5)
elif form.lower() == "p-":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, -0.5)
op += openfermion.FermionOperator(Ni + Ma, -0.5)
elif form.lower() == "g+":
op += openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
elif form.lower() == "g-":
op += openfermion.FermionOperator([], -1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, 1.0)
op += openfermion.FermionOperator(Ni + Ma, 1.0)
elif form.lower() == "p0":
# P0: we only construct P0 and don't keep the original generator
op = openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
else:
raise TequilaException(
"Unknown generator form {}, supported are G, P+, P-, G+, G- and P0".format(form))
qop = self.transformation(op)
# remove constant terms
# they have no effect in the unitary (if not controlled)
if remove_constant_term:
qop.qubit_operator.terms[tuple()] = 0.0
# check if the operator is hermitian and cast coefficients to floats
# in order to avoid trouble with the simulation backends
assert qop.is_hermitian()
for k, v in qop.qubit_operator.terms.items():
qop.qubit_operator.terms[k] = to_float(v)
qop = qop.simplify()
if len(qop) == 0:
warnings.warn("Excitation generator is a unit operator.\n"
"Non-standard transformations might not work with general fermionic operators\n"
"indices = " + str(indices), category=TequilaWarning)
return qop
def make_hardcore_boson_excitation_gate(self, indices, angle, control=None, assume_real=True, compile_options="optimize"):
target = []
for pair in indices:
assert len(pair) == 2
target += [pair[0], pair[1]]
consistency = [x < self.n_orbitals for x in target]
if not all(consistency):
raise TequilaException(
"make_hardcore_boson_excitation_gate: Inconsistencies in indices={}. Should be indexed from 0 ... n_orbitals={}".format(
indices, self.n_orbitals))
return gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, control=control, compile_options=compile_options)
def make_excitation_gate(self, indices, angle, control=None, assume_real=True, **kwargs):
"""
Initialize a fermionic excitation gate defined as
.. math::
e^{-i\\frac{a}{2} G}
with generator defines by the indices [(p0,q0),(p1,q1),...]
.. math::
G = i(\\prod_{k} a_{p_k}^\\dagger a_{q_k} - h.c.)
Parameters
----------
indices:
List of tuples that define the generator
angle:
Numeric or hashable type or tequila objective
control:
List of possible control qubits
assume_real:
Assume that the wavefunction will always stay real.
Will reduce potential gradient costs by a factor of 2
"""
generator = self.make_excitation_generator(indices=indices, remove_constant_term=control is None)
p0 = self.make_excitation_generator(indices=indices, form="P0", remove_constant_term=control is None)
return QCircuit.wrap_gate(
FermionicGateImpl(angle=angle, generator=generator, p0=p0, transformation=type(self.transformation).__name__.lower(), assume_real=assume_real, control=control, **kwargs))
def make_molecule(self, *args, **kwargs) -> MolecularData:
"""Creates a molecule in openfermion format by running psi4 and extracting the data
Will check for previous outputfiles before running
Will not recompute if a file was found
Parameters
----------
parameters :
An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4
The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file
Returns
-------
type
the molecule in openfermion.MolecularData format
"""
molecule = MolecularData(**self.parameters.molecular_data_param)
# try to load
do_compute = True
try:
import os
if os.path.exists(self.parameters.filename):
molecule.load()
do_compute = False
except OSError:
do_compute = True
if do_compute:
molecule = self.do_make_molecule(*args, **kwargs)
molecule.save()
return molecule
def do_make_molecule(self, *args, **kwargs):
"""
Parameters
----------
args
kwargs
Returns
-------
"""
# integrals need to be passed in base class
assert ("one_body_integrals" in kwargs)
assert ("two_body_integrals" in kwargs)
one_body_integrals = kwargs["one_body_integrals"]
two_body_integrals = kwargs["two_body_integrals"]
# tequila assumes "openfermion" ordering, integrals can however be passed
# down in other orderings, but it needs to be indicated by keyword
if "ordering" in kwargs:
two_body_integrals = NBodyTensor(two_body_integrals, ordering=kwargs["ordering"])
two_body_integrals.reorder(to="openfermion")
two_body_integrals = two_body_integrals.elems
if "nuclear_repulsion" in kwargs:
nuclear_repulsion = kwargs["nuclear_repulsion"]
else:
nuclear_repulsion = 0.0
warnings.warn("No nuclear_repulsion given for custom molecule, setting to zero", category=TequilaWarning)
if ("n_orbitals" in kwargs):
n_orbitals = kwargs["n_orbitals"]
else:
n_orbitals = one_body_integrals.shape[0]
for i in [0, 1, 2, 3]:
assert n_orbitals == two_body_integrals.shape[i]
molecule = MolecularData(**self.parameters.molecular_data_param)
molecule.one_body_integrals = one_body_integrals
molecule.two_body_integrals = two_body_integrals
molecule.nuclear_repulsion = nuclear_repulsion
molecule.n_orbitals = n_orbitals
if "n_electrons" in kwargs:
molecule.n_electrons = kwargs["n_electrons"]
molecule.save()
return molecule
@property
def n_orbitals(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_orbitals
else:
return len(self.active_space.active_orbitals)
@property
def n_electrons(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_electrons
else:
return 2 * len(self.active_space.active_reference_orbitals)
def make_hamiltonian(self, occupied_indices=None, active_indices=None, threshold=1.e-8) -> QubitHamiltonian:
""" """
if occupied_indices is None and self.active_space is not None:
occupied_indices = self.active_space.frozen_reference_orbitals
if active_indices is None and self.active_space is not None:
active_indices = self.active_space.active_orbitals
fop = openfermion.transforms.get_fermion_operator(
self.molecule.get_molecular_hamiltonian(occupied_indices, active_indices))
try:
qop = self.transformation(fop)
except TypeError:
qop = self.transformation(openfermion.transforms.get_interaction_operator(fop))
qop.is_hermitian()
return qop
def make_hardcore_boson_hamiltonian(self):
if not self.transformation.up_then_down:
warnings.warn(
"Hardcore-Boson Hamiltonian without reordering will result in non-consecutive Hamiltonians that are eventually not be combinable with other features of tequila. Try transformation=\'ReorderedJordanWigner\' or similar for more consistency",
TequilaWarning)
# integrate with QubitEncoding at some point
n_orbitals = self.n_orbitals
c, obt, tbt = self.get_integrals()
h = numpy.zeros(shape=[n_orbitals] * 2)
g = numpy.zeros(shape=[n_orbitals] * 2)
for p in range(n_orbitals):
h[p, p] += 2 * obt[p, p]
for q in range(n_orbitals):
h[p, q] += + tbt[p, p, q, q]
if p != q:
g[p, q] += 2 * tbt[p, q, q, p] - tbt[p, q, p, q]
H = c
for p in range(n_orbitals):
for q in range(n_orbitals):
up = p
uq = q
H += h[p, q] * Sm(up) * Sp(uq) + g[p, q] * Sm(up) * Sp(up) * Sm(uq) * Sp(uq)
return H
def make_molecular_hamiltonian(self):
if self.active_space:
return self.molecule.get_molecular_hamiltonian(occupied_indices=self.active_space.frozen_reference_orbitals,
active_indices=self.active_space.active_orbitals)
else:
return self.molecule.get_molecular_hamiltonian()
def get_integrals(self, two_body_ordering="openfermion"):
"""
Returns
-------
Tuple with:
constant part (nuclear_repulsion + possible integrated parts from active-spaces)
one_body_integrals
two_body_integrals
"""
if self.active_space is not None and len(self.active_space.frozen_reference_orbitals) > 0:
c, h1, h2 = self.molecule.get_active_space_integrals(active_indices=self.active_space.active_orbitals,
occupied_indices=self.active_space.frozen_reference_orbitals)
else:
c = 0.0
h1 = self.molecule.one_body_integrals
h2 = self.molecule.two_body_integrals
c += self.molecule.nuclear_repulsion
h2 = NBodyTensor(h2, ordering="openfermion")
h2 = h2.reorder(to=two_body_ordering).elems
return c, h1, h2
def compute_one_body_integrals(self):
""" convenience function """
c, h1, h2 = self.get_integrals()
return h1
def compute_two_body_integrals(self, two_body_ordering="openfermion"):
""" """
c, h1, h2 = self.get_integrals(two_body_ordering=two_body_ordering)
return h2
def compute_constant_part(self):
c, h1, h2 = self.get_integrals()
return c
def compute_ccsd_amplitudes(self) -> ClosedShellAmplitudes:
""" """
raise Exception("BaseClass Method")
def prepare_reference(self, state=None, *args, **kwargs):
"""
Returns
-------
A tequila circuit object which prepares the reference of this molecule in the chosen transformation
"""
if state is None:
assert self.n_electrons %2 == 0
state = [0]*(self.n_orbitals*2)
for i in range(self.n_electrons):
state[i]=1
reference_state = BitString.from_array(self.transformation.map_state(state=state))
U = prepare_product_state(reference_state)
# prevent trace out in direct wfn simulation
U.n_qubits = self.n_orbitals*2 # adapt when tapered transformations work
return U
def prepare_hardcore_boson_reference(self):
# HF state in the HCB representation (paired electrons)
U = gates.X(target=[i for i in range(self.n_electrons // 2)])
U.n_qubits = self.n_orbitals
return U
def hcb_to_me(self, U=None):
"""
Transform a circuit in the hardcore-boson encoding (HCB)
to the encoding of this molecule
HCB is supposed to be encoded on the first n_orbitals qubits
Parameters
----------
U: HCB circuit (using the alpha qubits)
Returns
-------
"""
if U is None:
U = QCircuit()
# consistency
consistency = [x < self.n_orbitals for x in U.qubits]
if not all(consistency):
warnings.warn(
"hcb_to_me: given circuit is not defined on the first {} qubits. Is this a HCB circuit?".format(
self.n_orbitals))
# map to alpha qubits
alpha_map = {k: self.transformation.up(k) for k in range(self.n_orbitals)}
alpha_U = U.map_qubits(qubit_map=alpha_map)
UX = self.transformation.hcb_to_me()
if UX is None:
raise TequilaException(
"transformation={} has no hcb_to_me function implemented".format(self.transformation))
return alpha_U + UX
def get_pair_specific_indices(self,
pair_info: str = None,
include_singles: bool = True,
general_excitations: bool = True) -> list:
"""
Assuming a pair-specific model, create a pair-specific index list
to be used in make_upccgsd_ansatz(indices = ... )
Excite from a set of references (i) to any pair coming from (i),
i.e. any (i,j)/(j,i). If general excitations are allowed, also
allow excitations from pairs to appendant pairs and reference.
Parameters
----------
pair_info
file or list including information about pair structure
references single number, pair double
example: as file: "0,1,11,11,00,10" (hand over file name)
in file, skip first row assuming some text with information
as list:['0','1`','11','11','00','10']
~> two reference orbitals 0 and 1,
then two orbitals from pair 11, one from 00, one mixed 10
include_singles
include single excitations
general_excitations
allow general excitations
Returns
-------
list of indices with pair-specific ansatz
"""
if pair_info is None:
raise TequilaException("Need to provide some pair information.")
# If pair-information given on file, load (layout see above)
if isinstance(pair_info, str):
pairs = numpy.loadtxt(pair_info, dtype=str, delimiter=",", skiprows=1)
elif isinstance(pair_info, list):
pairs = pair_info
elif not isinstance(pair_info, list):
raise TequilaException("Pair information needs to be contained in a list or filename.")
connect = [[]] * len(pairs)
# determine "connectivity"
generalized = 0
for idx, p in enumerate(pairs):
if len(p) == 1:
connect[idx] = [i for i in range(len(pairs))
if ((len(pairs[i]) == 2) and (str(idx) in pairs[i]))]
elif (len(p) == 2) and general_excitations:
connect[idx] = [i for i in range(len(pairs))
if (((p[0] in pairs[i]) or (p[1] in pairs[i]) or str(i) in p)
and not (i == idx))]
elif len(p) > 2:
raise TequilaException("Invalid reference of pair id.")
# create generating indices from connectivity
indices = []
for i, to in enumerate(connect):
for a in to:
indices.append(((2 * i, 2 * a), (2 * i + 1, 2 * a + 1)))
if include_singles:
indices.append(((2 * i, 2 * a)))
indices.append(((2 * i + 1, 2 * a + 1)))
return indices
def format_excitation_indices(self, idx):
"""
Consistent formatting of excitation indices
idx = [(p0,q0),(p1,q1),...,(pn,qn)]
sorted as: p0<p1<pn and pi<qi
:param idx: list of index tuples describing a single(!) fermionic excitation
:return: tuple-list of index tuples
"""
idx = [tuple(sorted(x)) for x in idx]
idx = sorted(idx, key=lambda x: x[0])
return tuple(idx)
def make_upccgsd_indices(self, key, reference_orbitals=None, *args, **kwargs):
if reference_orbitals is None:
reference_orbitals = [i for i in range(self.n_electrons // 2)]
indices = []
# add doubles in hcb encoding
if hasattr(key, "lower") and key.lower() == "ladder":
# ladder structure of the pair excitations
# ensures local connectivity
indices = [[(n, n + 1)] for n in range(self.n_orbitals - 1)]
elif hasattr(key, "lower") and "g" not in key.lower():
indices = [[(n, m)] for n in reference_orbitals for m in range(self.n_orbitals) if
n < m and m not in reference_orbitals]
elif hasattr(key, "lower") and "g" in key.lower():
indices = [[(n, m)] for n in range(self.n_orbitals) for m in range(self.n_orbitals) if n < m]
else:
raise TequilaException("Unknown recipe: {}".format(key))
indices = [self.format_excitation_indices(idx) for idx in indices]
return indices
def make_hardcore_boson_upccgd_layer(self,
indices: list = "UpCCGD",
label: str = None,
assume_real: bool = True,
*args, **kwargs):
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices.lower())
UD = QCircuit()
for idx in indices:
UD += self.make_hardcore_boson_excitation_gate(indices=idx, angle=(idx, "D", label),
assume_real=assume_real)
return UD
def make_ansatz(self, name:str, *args, **kwargs):
name = name.lower()
if name.strip()=="":
return QCircuit()
if "+" in name:
U = QCircuit()
subparts = name.split("+")
U = self.make_ansatz(name=subparts[0], *args ,**kwargs)
if "include_reference" in kwargs:
kwargs.pop("include_reference")
if "hcb_optimization" in kwargs:
kwargs.pop("hcb_optimization")
for subpart in subparts[1:]:
U += self.make_ansatz(name=subpart, *args, include_reference=False, hcb_optimization=False, **kwargs)
return U
if name=="uccsd":
return self.make_uccsd_ansatz(*args, **kwargs)
elif "d" in name or "s" in name:
return self.make_upccgsd_ansatz(name=name, *args, **kwargs)
else:
raise TequilaException("unknown ansatz with name={}".format(name))
def make_upccgsd_ansatz(self,
include_reference: bool = True,
name: str = "UpCCGSD",
label: str = None,
order: int = None,
assume_real: bool = True,
hcb_optimization: bool = None,
spin_adapt_singles: bool = True,
neglect_z = False,
*args, **kwargs):
"""
UpGCCSD Ansatz similar as described by Lee et. al.
Parameters
----------
include_singles
include singles excitations. Is overwritten if indices are a string (i.e. indices=UpCCGSD will always include singles, UpCCGD will not)
include_reference
include the HF reference state as initial state
indices
pass custom defined set of indices from which the ansatz will be created
List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]
label
An additional label that is set with the variables
default is None and no label will be set: variables names will be
(x, (p,q)) for x in range(order)
with a label the variables will be named
(label, (x, (p,q)))
order
Order of the ansatz (default is 1)
determines how often the ordering gets repeated
parameters of repeating layers are independent
assume_real
assume a real wavefunction (that is always the case if the reference state is real)
reduces potential gradient costs from 4 to 2
Returns
-------
UpGCCSD ansatz
"""
name = name.upper()
if ("A" in name) and neglect_z is None:
neglect_z = True
else:
neglect_z = False
if order is None:
try:
if "-" in name:
order = int(name.split("-")[0])
else:
order = 1
except:
order = 1
indices = self.make_upccgsd_indices(key=name)
# check if the used qubit encoding has a hcb transformation
have_hcb_trafo = self.transformation.hcb_to_me() is not None
# consistency checks for optimization
if have_hcb_trafo and hcb_optimization is None:
hcb_optimization = True
if "HCB" in name:
hcb_optimization = True
if hcb_optimization and not have_hcb_trafo and "HCB" not in name:
raise TequilaException(
"use_hcb={} but transformation={} has no \'hcb_to_me\' function. Try transformation=\'ReorderedJordanWigner\'".format(
hcb_optimization, self.transformation))
if "S" in name and "HCB" in name:
if "HCB" in name and "S" in name:
raise Exception(
"name={}, Singles can't be realized without mapping back to the standard encoding leave S or HCB out of the name".format(
name))
# first layer
if not hcb_optimization:
U = QCircuit()
if include_reference:
U = self.prepare_reference()
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, assume_real=assume_real,
label=(label, 0), spin_adapt_singles=spin_adapt_singles, *args, **kwargs)
else:
U = QCircuit()
if include_reference:
U = self.prepare_hardcore_boson_reference()
U += self.make_hardcore_boson_upccgd_layer(indices=indices, assume_real=assume_real, label=(label, 0),
*args, **kwargs)
if "HCB" not in name:
U = self.hcb_to_me(U=U)
if "S" in name:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=(label, 0),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z, *args, **kwargs)
for k in range(1, order):
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, label=(label, k),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z)
return U
def make_upccgsd_layer(self, indices, include_singles=True, include_doubles=True, assume_real=True, label=None,
spin_adapt_singles: bool = True, angle_transform=None, mix_sd=False, neglect_z=False, *args, **kwargs):
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
angle = (tuple([idx]), "D", label)
if include_doubles:
if "jordanwigner" in self.transformation.name.lower() and not self.transformation.up_then_down:
# we can optimize with qubit excitations for the JW representation
target=[self.transformation.up(idx[0]), self.transformation.up(idx[1]), self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle,
indices=((2 * idx[0], 2 * idx[1]), (2 * idx[0] + 1, 2 * idx[1] + 1)),
assume_real=assume_real, **kwargs)
if include_singles and mix_sd:
U += self.make_upccgsd_singles(indices=[idx], assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
if include_singles and not mix_sd:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
return U
def make_upccgsd_singles(self, indices="UpCCGSD", spin_adapt_singles=True, label=None, angle_transform=None,
assume_real=True, neglect_z=False, *args, **kwargs):
if neglect_z and "jordanwigner" not in self.transformation.name.lower():
raise TequilaException("neglegt-z approximation in UpCCGSD singles needs the (Reversed)JordanWigner representation")
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices)
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
if spin_adapt_singles:
angle = (idx, "S", label)
if angle_transform is not None:
angle = angle_transform(angle)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=targeta, assume_real=assume_real, **kwargs)
U += gates.QubitExcitation(angle=angle, target=targetb, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0], 2 * idx[1])], assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
else:
angle1 = (idx, "SU", label)
angle2 = (idx, "SD", label)
if angle_transform is not None:
angle1 = angle_transform(angle1)
angle2 = angle_transform(angle2)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle1, target=targeta, assume_real=assume_real, *kwargs)
U += gates.QubitExcitation(angle=angle2, target=targetb, assume_real=assume_real, *kwargs)
else:
U += self.make_excitation_gate(angle=angle1, indices=[(2 * idx[0], 2 * idx[1])],
assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle2, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
return U
def make_uccsd_ansatz(self, trotter_steps: int=1,
initial_amplitudes: typing.Union[str, Amplitudes, ClosedShellAmplitudes] = "mp2",
include_reference_ansatz=True,
parametrized=True,
threshold=1.e-8,
add_singles=None,
*args, **kwargs) -> QCircuit:
"""
Parameters
----------
initial_amplitudes :
initial amplitudes given as ManyBodyAmplitudes structure or as string
where 'mp2', 'cc2' or 'ccsd' are possible initializations
include_reference_ansatz :
Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)
parametrized :
Initialize with variables, otherwise with static numbers (Default value = True)
trotter_steps: int :
initial_amplitudes: typing.Union[str :
Amplitudes :
ClosedShellAmplitudes] :
(Default value = "cc2")
Returns
-------
type
Parametrized QCircuit
"""
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2" and add_singles is None:
add_singles=True
elif initial_amplitudes is not None and add_singles is not None:
warnings.warn("make_uccsd_anstatz: add_singles has no effect when explicit amplitudes are passed down", TequilaWarning)
elif add_singles is None:
add_singles=True
if self.n_electrons % 2 != 0:
raise TequilaException("make_uccsd_ansatz currently only for closed shell systems")
nocc = self.n_electrons // 2
nvirt = self.n_orbitals - nocc
Uref = QCircuit()
if include_reference_ansatz:
Uref = self.prepare_reference()
amplitudes = initial_amplitudes
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2":
amplitudes = self.compute_mp2_amplitudes()
elif initial_amplitudes.lower() == "ccsd":
amplitudes = self.compute_ccsd_amplitudes()
else:
try:
amplitudes = self.compute_amplitudes(method=initial_amplitudes.lower())
except Exception as exc:
raise TequilaException(
"{}\nDon't know how to initialize \'{}\' amplitudes".format(exc, initial_amplitudes))
if amplitudes is None:
tia=None
if add_singles: tia=numpy.zeros(shape=[nocc, nvirt])
amplitudes = ClosedShellAmplitudes(
tIjAb=numpy.zeros(shape=[nocc, nocc, nvirt, nvirt]),
tIA=tia)
closed_shell = isinstance(amplitudes, ClosedShellAmplitudes)
indices = {}
if not isinstance(amplitudes, dict):
amplitudes = amplitudes.make_parameter_dictionary(threshold=threshold)
amplitudes = dict(sorted(amplitudes.items(), key=lambda x: numpy.fabs(x[1]), reverse=True))
for key, t in amplitudes.items():
assert (len(key) % 2 == 0)
if not numpy.isclose(t, 0.0, atol=threshold):
if closed_shell:
if len(key) == 2 and add_singles:
# singles
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_a = (2*key[0], 2*key[1])
idx_b = (2*key[0]+1, 2*key[1]+1)
indices[idx_a]=angle
indices[idx_b]=angle
else:
assert len(key)==4
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_abab=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2], 2 * key[3])
indices[idx_abab]=angle
if key[0]!=key[2] and key[1]!=key[3]:
idx_aaaa=(2 * key[0], 2 * key[1], 2 * key[2], 2 * key[3])
idx_bbbb=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2]+1, 2 * key[3]+1)
partner = tuple([key[2], key[1], key[0], key[3]])
anglex=2.0*(t - amplitudes[partner])
if parametrized:
anglex=2.0*(Variable(name=key) - Variable(partner))
indices[idx_aaaa]=anglex
indices[idx_bbbb]=anglex
else:
raise Exception("only closed-shell supported, please assemble yourself .... sorry :-)")
UCCSD = QCircuit()
factor = 1.0 / trotter_steps
for step in range(trotter_steps):
for idx, angle in indices.items():
UCCSD += self.make_excitation_gate(indices=idx, angle=factor * angle)
if hasattr(initial_amplitudes,"lower") and initial_amplitudes.lower()=="mp2" and parametrized and add_singles:
# mp2 has no singles, need to initialize them here (if not parametrized initializling as 0.0 makes no sense though)
UCCSD += self.make_upccgsd_layer(indices="upccsd", include_singles=True, include_doubles=False)
return Uref + UCCSD
def compute_amplitudes(self, method: str, *args, **kwargs):
"""
Compute closed-shell CC amplitudes
Parameters
----------
method :
coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)
Success might depend on backend
got an extra function for MP2
*args :
**kwargs :
Returns
-------
"""
raise TequilaException("compute amplitudes: Needs to be overwritten by backend")
def compute_mp2_amplitudes(self) -> ClosedShellAmplitudes:
"""
Compute closed-shell mp2 amplitudes
.. math::
t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )
:return:
Parameters
----------
Returns
-------
"""
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.molecule.n_electrons // 2 # this is never the active space
ei = fij[:nocc]
ai = fij[nocc:]
abgij = g[nocc:, nocc:, :nocc, :nocc]
amplitudes = abgij * 1.0 / (
ei.reshape(1, 1, -1, 1) + ei.reshape(1, 1, 1, -1) - ai.reshape(-1, 1, 1, 1) - ai.reshape(1, -1, 1, 1))
E = 2.0 * numpy.einsum('abij,abij->', amplitudes, abgij) - numpy.einsum('abji,abij', amplitudes, abgij,
optimize='greedy')
self.molecule.mp2_energy = E + self.molecule.hf_energy
return ClosedShellAmplitudes(tIjAb=numpy.einsum('abij -> ijab', amplitudes, optimize='greedy'))
def compute_cis_amplitudes(self):
"""
Compute the CIS amplitudes of the molecule
"""
@dataclass
class ResultCIS:
""" """
omegas: typing.List[numbers.Real] # excitation energies [omega0, ...]
amplitudes: typing.List[ClosedShellAmplitudes] # corresponding amplitudes [x_{ai}_0, ...]
def __getitem__(self, item):
return (self.omegas[item], self.amplitudes[item])
def __len__(self):
return len(self.omegas)
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.n_alpha_electrons
nvirt = self.n_orbitals - nocc
pairs = []
for i in range(nocc):
for a in range(nocc, nocc + nvirt):
pairs.append((a, i))
M = numpy.ndarray(shape=[len(pairs), len(pairs)])
for xx, x in enumerate(pairs):
eia = fij[x[0]] - fij[x[1]]
a, i = x
for yy, y in enumerate(pairs):
b, j = y
delta = float(y == x)
gpart = 2.0 * g[a, i, b, j] - g[a, i, j, b]
M[xx, yy] = eia * delta + gpart
omega, xvecs = numpy.linalg.eigh(M)
# convert amplitudes to ndarray sorted by excitation energy
nex = len(omega)
amplitudes = []
for ex in range(nex):
t = numpy.ndarray(shape=[nvirt, nocc])
exvec = xvecs[ex]
for xx, x in enumerate(pairs):
a, i = x
t[a - nocc, i] = exvec[xx]
amplitudes.append(ClosedShellAmplitudes(tIA=t))
return ResultCIS(omegas=list(omega), amplitudes=amplitudes)
@property
def rdm1(self):
"""
Returns RMD1 if computed with compute_rdms function before
"""
if self._rdm1 is not None:
return self._rdm1
else:
print("1-RDM has not been computed. Return None for 1-RDM.")
return None
@property
def rdm2(self):
"""
Returns RMD2 if computed with compute_rdms function before
This is returned in Dirac (physics) notation by default (can be changed in compute_rdms with keyword)!
"""
if self._rdm2 is not None:
return self._rdm2
else:
print("2-RDM has not been computed. Return None for 2-RDM.")
return None
def compute_rdms(self, U: QCircuit = None, variables: Variables = None, spin_free: bool = True,
get_rdm1: bool = True, get_rdm2: bool = True, ordering="dirac"):
"""
Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given
a unitary U. This method uses the standard ordering in physics as denoted below.
Note, that the representation of the density matrices depends on the qubit transformation
used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density
matrices in the occupation picture.
We only consider real orbitals and thus real-valued RDMs.
The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.
.. math :
\\text{rdm1: } \\gamma^p_q = \\langle \\psi | a^p a_q | \\psi \\rangle
= \\langle U 0 | a^p a_q | U 0 \\rangle
\\text{rdm2: } \\gamma^{pq}_{rs} = \\langle \\psi | a^p a^q a_s a_r | \\psi \\rangle
= \\langle U 0 | a^p a^q a_s a_r | U 0 \\rangle
Parameters
----------
U :
Quantum Circuit to achieve the desired state \\psi = U |0\\rangle, non-optional
variables :
If U is parametrized, then need to hand over a set of fixed variables
spin_free :
Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals
get_rdm1, get_rdm2 :
Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,
it is recommended to compute them at once.
Returns
-------
"""
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
# Check whether transformation is BKSF.
# Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct
# transformation, because it computes the number of qubits incorrectly in this case.
# A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet.")
# Set up number of spin-orbitals and molecular orbitals respectively
n_SOs = 2 * self.n_orbitals
n_MOs = self.n_orbitals
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
def _get_of_op(operator_tuple):
""" Returns operator given by a operator tuple as OpenFermion - Fermion operator """
op = openfermion.FermionOperator(operator_tuple)
return op
def _get_qop_hermitian(of_operator) -> QubitHamiltonian:
""" Returns Hermitian part of Fermion operator as QubitHamiltonian """
qop = self.transformation(of_operator)
#qop = QubitHamiltonian(self.transformation(of_operator))
real, imag = qop.split(hermitian=True)
if real:
return real
elif not real:
raise TequilaException(
"Qubit Hamiltonian does not have a Hermitian part. Operator ={}".format(of_operator))
def _build_1bdy_operators_spinful() -> list:
""" Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp
ops = []
for p in range(n_SOs):
for q in range(p + 1):
op_tuple = ((p, 1), (q, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinful() -> list:
""" Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = -pqsr = -qprs = qpsr
# and = rspq
ops = []
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
op_tuple = ((p, 1), (q, 1), (s, 0), (r, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_1bdy_operators_spinfree() -> list:
""" Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp (not changed by spin-summation)
ops = []
for p in range(n_MOs):
for q in range(p + 1):
# Spin aa
op_tuple = ((2 * p, 1), (2 * q, 0))
op = _get_of_op(op_tuple)
# Spin bb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 0))
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinfree() -> list:
""" Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out)
# and = rspq
ops = []
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
# Spin aaaa
op_tuple = ((2 * p, 1), (2 * q, 1), (2 * s, 0), (2 * r, 0)) if (p != q and r != s) else '0.0 []'
op = _get_of_op(op_tuple)
# Spin abab
op_tuple = ((2 * p, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r, 0)) if (
2 * p != 2 * q + 1 and 2 * r != 2 * s + 1) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin baba
op_tuple = ((2 * p + 1, 1), (2 * q, 1), (2 * s, 0), (2 * r + 1, 0)) if (
2 * p + 1 != 2 * q and 2 * r + 1 != 2 * s) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin bbbb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r + 1, 0)) if (
p != q and r != s) else '0.0 []'
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _assemble_rdm1(evals) -> numpy.ndarray:
"""
Returns spin-ful or spin-free one-particle RDM built by symmetry conditions
Same symmetry with or without spin, so we can use the same function
"""
N = n_MOs if spin_free else n_SOs
rdm1 = numpy.zeros([N, N])
ctr: int = 0
for p in range(N):
for q in range(p + 1):
rdm1[p, q] = evals[ctr]
# Symmetry pq = qp
rdm1[q, p] = rdm1[p, q]
ctr += 1
return rdm1
def _assemble_rdm2_spinful(evals) -> numpy.ndarray:
""" Returns spin-ful two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_SOs, n_SOs, n_SOs, n_SOs])
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetries due to anticommutation relations
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
rdm2[p, q, s, r] = -1 * rdm2[p, q, r, s] # pqrs = -pqsr
rdm2[q, p, r, s] = -1 * rdm2[p, q, r, s] # pqrs = -qprs
rdm2[q, p, s, r] = rdm2[p, q, r, s] # pqrs = qpsr
return rdm2
def _assemble_rdm2_spinfree(evals) -> numpy.ndarray:
""" Returns spin-free two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetry: pqrs = qpsr
for p, q, r, s in product(range(n_MOs), repeat=4):
if p >= q or r >= s:
rdm2[q, p, s, r] = rdm2[p, q, r, s]
return rdm2
# Build operator lists
qops = []
if spin_free:
qops += _build_1bdy_operators_spinfree() if get_rdm1 else []
qops += _build_2bdy_operators_spinfree() if get_rdm2 else []
else:
qops += _build_1bdy_operators_spinful() if get_rdm1 else []
qops += _build_2bdy_operators_spinful() if get_rdm2 else []
# Transform operator lists to QubitHamiltonians
qops = [_get_qop_hermitian(op) for op in qops]
# Compute expected values
evals = simulate(ExpectationValue(H=qops, U=U, shape=[len(qops)]), variables=variables)
# Assemble density matrices
# If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type
def _reset_rdm(rdm):
if rdm is not None:
if spin_free and rdm.shape[0] != n_MOs:
return None
if not spin_free and rdm.shape[0] != n_SOs:
return None
return rdm
self._rdm1 = _reset_rdm(self._rdm1)
self._rdm2 = _reset_rdm(self._rdm2)
# Split expectation values in 1- and 2-particle expectation values
if get_rdm1:
len_1 = n_MOs * (n_MOs + 1) // 2 if spin_free else n_SOs * (n_SOs + 1) // 2
else:
len_1 = 0
evals_1, evals_2 = evals[:len_1], evals[len_1:]
# Build matrices using the expectation values
self._rdm1 = _assemble_rdm1(evals_1) if get_rdm1 else self._rdm1
if spin_free:
self._rdm2 = _assemble_rdm2_spinfree(evals_2) if get_rdm2 else self._rdm2
else:
self._rdm2 = _assemble_rdm2_spinful(evals_2) if get_rdm2 else self._rdm2
if get_rdm2:
rdm2 = NBodyTensor(elems=self.rdm2, ordering="dirac")
rdm2.reorder(to=ordering)
rdm2 = rdm2.elems
self._rdm2 = rdm2
if get_rdm1:
if get_rdm2:
return self.rdm1, self.rdm2
else:
return self.rdm1
elif get_rdm2:
return self.rdm2
else:
warnings.warn("compute_rdms called with instruction to not compute?", TequilaWarning)
def rdm_spinsum(self, sum_rdm1: bool = True, sum_rdm2: bool = True) -> tuple:
"""
Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.
Parameters
----------
sum_rdm1, sum_rdm2 :
If set to true, perform spin summation on rdm1, rdm2
Returns
-------
rdm1_spinsum, rdm2_spinsum :
The desired spin-free matrices
"""
n_MOs = self.n_orbitals
rdm1_spinsum = None
rdm2_spinsum = None
# Spin summation on rdm1
if sum_rdm1:
# Check whether spin-rdm2 exists
if self._rdm1 is None:
raise TequilaException("The spin-RDM for the 1-RDM does not exist!")
# Check whether existing rdm1 is in spin-orbital basis
if self._rdm1.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm1_spinsum = numpy.zeros([n_MOs, n_MOs])
for p in range(n_MOs):
for q in range(p + 1):
rdm1_spinsum[p, q] += self._rdm1[2 * p, 2 * q]
rdm1_spinsum[p, q] += self._rdm1[2 * p + 1, 2 * q + 1]
for p in range(n_MOs):
for q in range(p):
rdm1_spinsum[q, p] = rdm1_spinsum[p, q]
# Spin summation on rdm2
if sum_rdm2:
# Check whether spin-rdm2 exists
if self._rdm2 is None:
raise TequilaException("The spin-RDM for the 2-RDM does not exist!")
# Check whether existing rdm2 is in spin-orbital basis
if self._rdm2.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm2_spinsum = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q, 2 * r, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1]
return rdm1_spinsum, rdm2_spinsum
def perturbative_f12_correction(self, rdm1: numpy.ndarray = None, rdm2: numpy.ndarray = None,
gamma: float = 1.4, n_ri: int = None,
external_info: dict = None, **kwargs) -> float:
"""
Computes the spin-free [2]_R12 correction, needing only the 1- and 2-RDM of a reference method
Requires either 1-RDM, 2-RDM or information to compute them in kwargs
Parameters
----------
rdm1 :
1-electron reduced density matrix
rdm2 :
2-electron reduced density matrix
gamma :
f12-exponent, for a correlation factor f_12 = -1/gamma * exp[-gamma*r_12]
n_ri :
dimensionality of RI-basis; specify only, if want to truncate available RI-basis
if None, then the maximum available via tensors / basis-set is used
must not be larger than size of available RI-basis, and not smaller than size of OBS
for n_ri==dim(OBS), the correction returns zero
external_info :
for usage in qc_base, need to provide information where to find one-body tensor f12-tensor <rs|f_12|pq>;
pass dictionary with {"f12_filename": where to find f12-tensor, "scheme": ordering scheme of tensor}
kwargs :
e.g. RDM-information via {"U": QCircuit, "variables": optimal angles}, needs to be passed if rdm1,rdm2 not
yet computed
Returns
-------
the f12 correction for the energy
"""
from .f12_corrections._f12_correction_base import ExplicitCorrelationCorrection
correction = ExplicitCorrelationCorrection(mol=self, rdm1=rdm1, rdm2=rdm2, gamma=gamma,
n_ri=n_ri, external_info=external_info, **kwargs)
return correction.compute()
def __str__(self) -> str:
result = str(type(self)) + "\n"
result += "Qubit Encoding\n"
result += str(self.transformation) + "\n\n"
result += "Parameters\n"
for k, v in self.parameters.__dict__.items():
result += "{key:15} : {value:15} \n".format(key=str(k), value=str(v))
result += "\n"
return result
|
compute_mp2_amplitudes
|
Compute closed-shell mp2 amplitudes
.. math::
t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )
:return:
Parameters
----------
Returns
-------
|
import os
from dataclasses import dataclass
from tequila import TequilaException, BitString, TequilaWarning
from tequila.hamiltonian import QubitHamiltonian
from tequila.wavefunction import QubitWaveFunction
from tequila.hamiltonian.paulis import Sp, Sm, Qp, Qm
from tequila.circuit import QCircuit, gates, _gates_impl
from tequila.objective.objective import Variable, Variables, ExpectationValue
from tequila.simulators.simulator_api import simulate
from tequila.utils import to_float
from tequila.objective import assign_variable
from .encodings import known_encodings
import typing, numpy, numbers, copy
from itertools import product
# if you are experiencing import errors you need to update openfermion
# required is version >= 1.0
# otherwise replace with from openfermion.hamiltonians import MolecularData
import openfermion
from openfermion.chem import MolecularData
import warnings
@dataclass
class ActiveSpaceData:
active_orbitals: list # active orbitals (spatial, c1)
reference_orbitals: list # reference orbitals (spatial, c1)
def __str__(self):
result = "Active Space Data:\n"
result += "{key:15} : {value:15} \n".format(key="active_orbitals", value=str(self.active_orbitals))
result += "{key:15} : {value:15} \n".format(key="reference_orbitals",
value=str(self.reference_orbitals))
result += "{key:15} : {value:15} \n".format(key="frozen_docc", value=str(self.frozen_docc))
result += "{key:15} : {value:15} \n".format(key="frozen_uocc", value=str(self.frozen_uocc))
return result
@property
def frozen_reference_orbitals(self):
return [i for i in self.reference_orbitals if i not in self.active_orbitals]
@property
def active_reference_orbitals(self):
return [i for i in self.reference_orbitals if i in self.active_orbitals]
class FermionicGateImpl(gates.QubitExcitationImpl):
# keep the overview in circuits
def __init__(self, generator, p0, transformation, *args, **kwargs):
super().__init__(generator=generator, target=generator.qubits, p0=p0, *args, **kwargs)
self._name = "FermionicExcitation"
self.transformation=transformation
def compile(self):
return gates.Trotterized(generator=self.generator, control=self.control, angle=self.parameter, steps=1)
def prepare_product_state(state: BitString) -> QCircuit:
"""Small convenience function
Parameters
----------
state :
product state encoded into a bitstring
state: BitString :
Returns
-------
type
unitary circuit which prepares the product state
"""
result = QCircuit()
for i, v in enumerate(state.array):
if v == 1:
result += gates.X(target=i)
return result
@dataclass
class ParametersQC:
"""Specialization of ParametersHamiltonian"""
basis_set: str = None # Quantum chemistry basis set
geometry: str = None # geometry of the underlying molecule (units: Angstrom!),
# this can be a filename leading to an .xyz file or the geometry given as a string
description: str = ""
multiplicity: int = 1
charge: int = 0
name: str = None
@property
def n_electrons(self, *args, **kwargs):
return self.get_nuc_charge() - self.charge
def get_nuc_charge(self):
return sum(self.get_atom_number(name=atom) for atom in self.get_atoms())
def get_atom_number(self, name):
atom_numbers={"h":1, "he":2, "li":3, "be":4, "b":5, "c":6, "n":7, "o":8, "f":9, "ne":10, "na":11, "mg":12, "al":13, "si":14, "ph":15, "s":16, "cl":17, "ar":18}
if name.lower() in atom_numbers:
return atom_numbers[name.lower()]
try:
import periodictable as pt
atom=name.lower()
atom[0]=atom[0].upper()
element = pt.elements.symbol(atom)
return element.number()
except:
raise TequilaException("can not assign atomic number to element {}\npip install periodictable will fix it".format(atom))
def get_atoms(self):
return [x[0] for x in self.get_geometry()]
def __post_init__(self,*args, **kwargs):
if self.name is None and self.geometry is None:
raise TequilaException("no geometry or name given to molecule\nprovide geometry=filename.xyz or geometry=`h 0.0 0.0 0.0\\n...`\nor name=whatever with file whatever.xyz being present")
# auto naming
if self.name is None:
if ".xyz" in self.geometry:
self.name=self.geometry.split(".xyz")[0]
if self.description is None:
coord, description = self.read_xyz_from_file()
self.description=description
else:
atoms=self.get_atoms()
atom_names=sorted(list(set(atoms)), key=lambda x: self.get_atom_number(x), reverse=True)
if self.name is None:
drop_ones=lambda x: "" if x==1 else x
self.name="".join(["{}{}".format(x,drop_ones(atoms.count(x))) for x in atom_names])
self.name = self.name.lower()
if self.geometry is None:
self.geometry=self.name+".xyz"
if ".xyz" in self.geometry and not os.path.isfile(self.geometry):
raise TequilaException("could not find file for molecular coordinates {}".format(self.geometry))
@property
def filename(self):
""" """
return "{}_{}".format(self.name, self.basis_set)
@property
def molecular_data_param(self) -> dict:
""":return: Give back all parameters for the MolecularData format from openfermion as dictionary"""
return {'basis': self.basis_set, 'geometry': self.get_geometry(), 'description': self.description,
'charge': self.charge, 'multiplicity': self.multiplicity, 'filename': self.filename
}
@staticmethod
def format_element_name(string):
"""OpenFermion uses case sensitive hash tables for chemical elements
I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work
this convenience function does the naming
:return: first letter converted to upper rest to lower
Parameters
----------
string :
Returns
-------
"""
assert (len(string) > 0)
assert (isinstance(string, str))
fstring = string[0].upper() + string[1:].lower()
return fstring
@staticmethod
def convert_to_list(geometry):
"""Convert a molecular structure given as a string into a list suitable for openfermion
Parameters
----------
geometry :
a string specifying a mol. structure. E.g. geometry="h 0.0 0.0 0.0\n h 0.0 0.0 1.0"
Returns
-------
type
A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]
"""
result = []
# Remove blank lines
lines = [l for l in geometry.split("\n") if l]
for line in lines:
words = line.split()
# Pad coordinates
if len(words) < 4:
words += [0.0] * (4 - len(words))
try:
tmp = (ParametersQC.format_element_name(words[0]),
(float(words[1]), float(words[2]), float(words[3])))
result.append(tmp)
except ValueError:
print("get_geometry list unknown line:\n ", line, "\n proceed with caution!")
return result
def get_geometry_string(self) -> str:
"""returns the geometry as a string
:return: geometry string
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if comment is not None:
self.description = comment
return geomstring
else:
return self.geometry
def get_geometry(self):
"""Returns the geometry
If a xyz filename was given the file is read out
otherwise it is assumed that the geometry was given as string
which is then reformatted as a list usable as input for openfermion
:return: geometry as list
e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]
Units: Angstrom!
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if self.description == '':
self.description = comment
return self.convert_to_list(geomstring)
elif self.geometry is not None:
return self.convert_to_list(self.geometry)
else:
raise Exception("Parameters.qc.geometry is None")
@staticmethod
def read_xyz_from_file(filename):
"""Read XYZ filetype for molecular structures
https://en.wikipedia.org/wiki/XYZ_file_format
Units: Angstrom!
Parameters
----------
filename :
return:
Returns
-------
"""
with open(filename, 'r') as file:
content = file.readlines()
natoms = int(content[0])
comment = str(content[1]).strip('\n')
coord = ''
for i in range(natoms):
coord += content[2 + i]
return coord, comment
@dataclass
class ClosedShellAmplitudes:
""" """
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Returns
-------
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, J, A, B), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(nocc + A, I, nocc + B, J)] = value
if self.tIA is not None:
nocc = self.tIA.shape[0]
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(A + nocc, I)] = value
return dict(sorted(variables.items(), key=lambda x: numpy.abs(x[1]), reverse=True))
@dataclass
class Amplitudes:
"""Coupled-Cluster Amplitudes
We adopt the Psi4 notation for consistency
I,A for alpha
i,a for beta
Parameters
----------
Returns
-------
"""
@classmethod
def from_closed_shell(cls, cs: ClosedShellAmplitudes):
"""
Initialize from closed-shell Amplitude structure
Parameters
----------
cs: ClosedShellAmplitudes :
Returns
-------
"""
tijab = cs.tIjAb - numpy.einsum("ijab -> ijba", cs.tIjAb, optimize='greedy')
return cls(tIjAb=cs.tIjAb, tIA=cs.tIA, tiJaB=cs.tIjAb, tia=cs.tIA, tijab=tijab, tIJAB=tijab)
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
tiJaB: numpy.ndarray = None
tijab: numpy.ndarray = None
tIJAB: numpy.ndarray = None
tia: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Neglect amplitudes below the threshold
Returns
-------
Dictionary of tequila variables (hash is in the style of (a,i,b,j))
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, j, A, b), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + b) + 1, j + 1)] = value
for (i, J, a, B), value in numpy.ndenumerate(self.tiJaB):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + B), J)] = value
for (i, j, a, b), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + b) + 1, j + 1)] = value
for (I, J, A, B), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + B), J)] = value
if self.tIA is not None:
nocc = self.tIjAb.shape[0]
assert (self.tia.shape[0] == nocc)
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (A + nocc), 2 * I)] = value
for (i, a), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (a + nocc) + 1, 2 * i + 1)] = value
return variables
class NBodyTensor:
""" Convenience class for handling N-body tensors """
class Ordering:
def __init__(self, scheme):
if hasattr(scheme, "_scheme"):
scheme = scheme._scheme
elif hasattr(scheme, "scheme"):
scheme = scheme.scheme
self._scheme = self.assign_scheme(scheme)
def assign_scheme(self, scheme):
if scheme is None:
return "chem"
else:
scheme = str(scheme)
if scheme.lower() in ["mulliken", "chem", "c", "1122"]:
return "chem"
elif scheme.lower() in ["dirac", "phys", "p", "1212"]:
return "phys"
elif scheme.lower() in ["openfermion", "of", "o", "1221"]:
return "of"
else:
raise TequilaException(
"Unknown two-body tensor scheme {}. Supported are dirac, mulliken, and openfermion".format(scheme))
def is_phys(self):
return self._scheme == "phys"
def is_chem(self):
return self._scheme == "chem"
def is_of(self):
return self._scheme == "of"
def __init__(self, elems: numpy.ndarray = None, active_indices: list = None, ordering: str = None,
size_full: int = None):
"""
Parameters
----------
elems: Tensor data as numpy array
active_indices: List of active indices in total ordering
ordering: Ordering scheme for two body tensors
"dirac" or "phys": <12|g|12>
.. math::
g_{pqrs} = \\int d1 d2 p(1)q(2) g(1,2) r(1)s(2)
"mulliken" or "chem": (11|g|22)
.. math::
g_{pqrs} = \\int d1 d2 p(1)r(2) g(1,2) q(1)s(2)
"openfermion":
.. math:: [12|g|21]
g_{gqprs} = \\int d1 d2 p(1)q(2) g(1,2) s(1)r(2)
size_full
"""
# Set elements
self.elems = elems
# Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible
# representations
if active_indices is not None:
self.active_indices = active_indices
self._passive_indices = None
self._full_indices = None
self._indices_set: bool = False
# Determine order of tensor
# Assume, that tensor is entered in desired shape, not as flat array.
self.order = len(self.elems.shape)
# Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well
if size_full is None:
self._size_full = self.elems.shape[0]
else:
self._size_full = size_full
# 2-body tensors (<=> order 4) currently allow reordering
if self.order == 4:
self.ordering = self.Ordering(ordering)
else:
if ordering is not None:
raise Exception("Ordering only implemented for tensors of order 4 / 2-body tensors.")
self.ordering = None
def sub_lists(self, idx_lists: list = None) -> numpy.ndarray:
"""
Get subspace of tensor by a set of index lists
according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]
This essentially is an implementation of a non-contiguous slicing using numpy.take
Parameters
----------
idx_lists :
List of lists, each defining the desired subspace per axis
Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
# Check if index list has correct size
if len(idx_lists) != self.order:
raise Exception("Need to pass an index list for each dimension!" +
" Length of idx_lists needs to match order of tensor.")
# Perform slicing via numpy.take
out = self.elems
for ax in range(self.order):
if idx_lists[ax] is not None: # None means, we want the full space in this direction
out = numpy.take(out, idx_lists[ax], axis=ax)
return out
def set_index_lists(self):
""" Set passive and full index lists based on class inputs """
tmp_size = self._size_full
if self._size_full is None:
tmp_size = self.elems.shape[0]
self._passive_indices = [i for i in range(tmp_size)
if i not in self.active_indices]
self._full_indices = [i for i in range(tmp_size)]
def sub_str(self, name: str) -> numpy.ndarray:
"""
Get subspace of tensor by a string
Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.
Full space in this context may also be smaller than actual tensor dimension.
The specification of active space in this context only allows to pick a set from a list of orbitals, and
is not able to resolve an active space from irreducible representations.
Example for one-body tensor:
hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]
Parameters
----------
name :
String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
if not self._indices_set:
self.set_index_lists()
self._indices_set = True
if name is None:
raise Exception("No name specified.")
if len(name) != self.order:
raise Exception("Name does not match order of the tensor.")
if self.active_indices is None:
raise Exception("Need to set an active space in order to call this function.")
idx_lists = []
# Parse name as string of space indices
for char in name:
if char.lower() == 'a':
idx_lists.append(self.active_indices)
elif char.lower() == 'p':
idx_lists.append(self._passive_indices)
elif char.lower() == 'f':
if self._size_full is None:
idx_lists.append(None)
else:
idx_lists.append(self._full_indices)
else:
raise Exception("Need to specify a valid letter (a,p,f).")
out = self.sub_lists(idx_lists)
return out
def reorder(self, to: str = 'of'):
"""
Function to reorder tensors according to some convention.
Parameters
----------
to :
Ordering scheme of choice.
'openfermion', 'of' (default) :
openfermion - ordering, corresponds to integrals of the type
h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)
with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)
currently needed for dependencies on openfermion-library
'chem', 'c' :
quantum chemistry ordering, collect particle terms,
more convenient for real-space methods
h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)
This is output by psi4
'phys', 'p' :
typical physics ordering, integrals of type
h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)
with operators a^pq_rs = a^p a^q a_s a_r
Returns
-------
"""
if self.order != 4:
raise Exception('Reordering currently only implemented for two-body tensors.')
to = self.Ordering(to)
if self.ordering == to:
return self
elif self.ordering.is_chem():
if to.is_of():
self.elems = numpy.einsum("psqr -> pqrs", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("prqs -> pqrs", self.elems, optimize='greedy')
elif self.ordering.is_of():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> psqr", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("pqrs -> pqsr", self.elems, optimize='greedy')
elif self.ordering.is_phys():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> prqs", self.elems, optimize='greedy')
elif to.is_of():
self.elems = numpy.einsum("pqsr -> pqrs", self.elems, optimize='greedy')
return self
class QuantumChemistryBase:
def __init__(self, parameters: ParametersQC,
transformation: typing.Union[str, typing.Callable] = None,
active_orbitals: list = None,
*args,
**kwargs):
self.parameters = parameters
if "molecule" in kwargs:
self.molecule = kwargs["molecule"]
else:
self.molecule = self.make_molecule(*args, **kwargs)
assert (parameters.basis_set.lower() == self.molecule.basis.lower())
assert (parameters.multiplicity == self.molecule.multiplicity)
assert (parameters.charge == self.molecule.charge)
self.active_space = None
if active_orbitals is not None:
self.active_space = self._make_active_space_data(active_orbitals=active_orbitals)
self.transformation = self._initialize_transformation(transformation=transformation, *args, **kwargs)
self._rdm1 = None
self._rdm2 = None
def _initialize_transformation(self, transformation=None, *args, **kwargs):
if transformation is None:
transformation = "JordanWigner"
# filter out arguments to the transformation
trafo_args = {k.split("__")[1]: v for k, v in kwargs.items() if
(hasattr(k, "lower") and "transformation__" in k.lower())}
trafo_args["n_electrons"] = self.n_electrons
trafo_args["n_orbitals"] = self.n_orbitals
if hasattr(transformation, "upper"):
# format to conventions
transformation = transformation.replace("_", "").replace("-", "").upper()
encodings = known_encodings()
if transformation in encodings:
transformation = encodings[transformation](**trafo_args)
else:
raise TequilaException(
"Unkown Fermion-to-Qubit encoding {}. Try something like: {}".format(transformation,
list(encodings.keys())))
return transformation
def _make_active_space_data(self, active_orbitals, reference=None):
"""
Small helper function
Internal use only
Parameters
----------
active_orbitals: dictionary :
list: Give a list of spatial orbital indices
i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used
reference: (Default value=None)
List of orbitals which form the reference
Can be given in the same format as active_orbitals
If given as None then the first N_electron/2 orbitals are taken
for closed-shell systems.
Returns
-------
Dataclass with active indices and reference indices (in spatial notation)
"""
if active_orbitals is None:
return None
if reference is None:
# auto assignment only for closed-shell
assert (self.n_electrons % 2 == 0)
reference = sorted([i for i in range(self.n_electrons // 2)])
return ActiveSpaceData(active_orbitals=sorted(active_orbitals),
reference_orbitals=sorted(reference))
@classmethod
def from_openfermion(cls, molecule: openfermion.MolecularData,
transformation: typing.Union[str, typing.Callable] = None,
*args,
**kwargs):
"""
Initialize direclty from openfermion MolecularData object
Parameters
----------
molecule
The openfermion molecule
Returns
-------
The Tequila molecule
"""
parameters = ParametersQC(basis_set=molecule.basis, geometry=molecule.geometry,
description=molecule.description, multiplicity=molecule.multiplicity,
charge=molecule.charge)
return cls(parameters=parameters, transformation=transformation, molecule=molecule, *args, **kwargs)
def make_excitation_generator(self,
indices: typing.Iterable[typing.Tuple[int, int]],
form: str = None,
remove_constant_term: bool = True) -> QubitHamiltonian:
"""
Notes
----------
Creates the transformed hermitian generator of UCC type unitaries:
M(a^\dagger_{a_0} a_{i_0} a^\dagger{a_1}a_{i_1} ... - h.c.)
where the qubit map M depends is self.transformation
Parameters
----------
indices : typing.Iterable[typing.Tuple[int, int]] :
List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)
can also be given as one big list: [a_0, i_0, a_1, i_1 ...]
form : str : (Default value None):
Manipulate the generator to involution or projector
set form='involution' or 'projector'
the default is no manipulation which gives the standard fermionic excitation operator back
remove_constant_term: bool: (Default value True):
by default the constant term in the qubit operator is removed since it has no effect on the unitary it generates
if the unitary is controlled this might not be true!
Returns
-------
type
1j*Transformed qubit excitation operator, depends on self.transformation
"""
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet")
# check indices and convert to list of tuples if necessary
if len(indices) == 0:
raise TequilaException("make_excitation_operator: no indices given")
elif not isinstance(indices[0], typing.Iterable):
if len(indices) % 2 != 0:
raise TequilaException("make_excitation_generator: unexpected input format of indices\n"
"use list of tuples as [(a_0, i_0),(a_1, i_1) ...]\n"
"or list as [a_0, i_0, a_1, i_1, ... ]\n"
"you gave: {}".format(indices))
converted = [(indices[2 * i], indices[2 * i + 1]) for i in range(len(indices) // 2)]
else:
converted = indices
# convert everything to native python int
# otherwise openfermion will complain
converted = [(int(pair[0]), int(pair[1])) for pair in converted]
# convert to openfermion input format
ofi = []
dag = []
for pair in converted:
assert (len(pair) == 2)
ofi += [(int(pair[0]), 1),
(int(pair[1]), 0)] # openfermion does not take other types of integers like numpy.int64
dag += [(int(pair[0]), 0), (int(pair[1]), 1)]
op = openfermion.FermionOperator(tuple(ofi), 1.j) # 1j makes it hermitian
op += openfermion.FermionOperator(tuple(reversed(dag)), -1.j)
if isinstance(form, str) and form.lower() != 'fermionic':
# indices for all the Na operators
Na = [x for pair in converted for x in [(pair[0], 1), (pair[0], 0)]]
# indices for all the Ma operators (Ma = 1 - Na)
Ma = [x for pair in converted for x in [(pair[0], 0), (pair[0], 1)]]
# indices for all the Ni operators
Ni = [x for pair in converted for x in [(pair[1], 1), (pair[1], 0)]]
# indices for all the Mi operators
Mi = [x for pair in converted for x in [(pair[1], 0), (pair[1], 1)]]
# can gaussianize as projector or as involution (last is default)
if form.lower() == "p+":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, 0.5)
op += openfermion.FermionOperator(Ni + Ma, 0.5)
elif form.lower() == "p-":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, -0.5)
op += openfermion.FermionOperator(Ni + Ma, -0.5)
elif form.lower() == "g+":
op += openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
elif form.lower() == "g-":
op += openfermion.FermionOperator([], -1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, 1.0)
op += openfermion.FermionOperator(Ni + Ma, 1.0)
elif form.lower() == "p0":
# P0: we only construct P0 and don't keep the original generator
op = openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
else:
raise TequilaException(
"Unknown generator form {}, supported are G, P+, P-, G+, G- and P0".format(form))
qop = self.transformation(op)
# remove constant terms
# they have no effect in the unitary (if not controlled)
if remove_constant_term:
qop.qubit_operator.terms[tuple()] = 0.0
# check if the operator is hermitian and cast coefficients to floats
# in order to avoid trouble with the simulation backends
assert qop.is_hermitian()
for k, v in qop.qubit_operator.terms.items():
qop.qubit_operator.terms[k] = to_float(v)
qop = qop.simplify()
if len(qop) == 0:
warnings.warn("Excitation generator is a unit operator.\n"
"Non-standard transformations might not work with general fermionic operators\n"
"indices = " + str(indices), category=TequilaWarning)
return qop
def make_hardcore_boson_excitation_gate(self, indices, angle, control=None, assume_real=True, compile_options="optimize"):
target = []
for pair in indices:
assert len(pair) == 2
target += [pair[0], pair[1]]
consistency = [x < self.n_orbitals for x in target]
if not all(consistency):
raise TequilaException(
"make_hardcore_boson_excitation_gate: Inconsistencies in indices={}. Should be indexed from 0 ... n_orbitals={}".format(
indices, self.n_orbitals))
return gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, control=control, compile_options=compile_options)
def make_excitation_gate(self, indices, angle, control=None, assume_real=True, **kwargs):
"""
Initialize a fermionic excitation gate defined as
.. math::
e^{-i\\frac{a}{2} G}
with generator defines by the indices [(p0,q0),(p1,q1),...]
.. math::
G = i(\\prod_{k} a_{p_k}^\\dagger a_{q_k} - h.c.)
Parameters
----------
indices:
List of tuples that define the generator
angle:
Numeric or hashable type or tequila objective
control:
List of possible control qubits
assume_real:
Assume that the wavefunction will always stay real.
Will reduce potential gradient costs by a factor of 2
"""
generator = self.make_excitation_generator(indices=indices, remove_constant_term=control is None)
p0 = self.make_excitation_generator(indices=indices, form="P0", remove_constant_term=control is None)
return QCircuit.wrap_gate(
FermionicGateImpl(angle=angle, generator=generator, p0=p0, transformation=type(self.transformation).__name__.lower(), assume_real=assume_real, control=control, **kwargs))
def make_molecule(self, *args, **kwargs) -> MolecularData:
"""Creates a molecule in openfermion format by running psi4 and extracting the data
Will check for previous outputfiles before running
Will not recompute if a file was found
Parameters
----------
parameters :
An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4
The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file
Returns
-------
type
the molecule in openfermion.MolecularData format
"""
molecule = MolecularData(**self.parameters.molecular_data_param)
# try to load
do_compute = True
try:
import os
if os.path.exists(self.parameters.filename):
molecule.load()
do_compute = False
except OSError:
do_compute = True
if do_compute:
molecule = self.do_make_molecule(*args, **kwargs)
molecule.save()
return molecule
def do_make_molecule(self, *args, **kwargs):
"""
Parameters
----------
args
kwargs
Returns
-------
"""
# integrals need to be passed in base class
assert ("one_body_integrals" in kwargs)
assert ("two_body_integrals" in kwargs)
one_body_integrals = kwargs["one_body_integrals"]
two_body_integrals = kwargs["two_body_integrals"]
# tequila assumes "openfermion" ordering, integrals can however be passed
# down in other orderings, but it needs to be indicated by keyword
if "ordering" in kwargs:
two_body_integrals = NBodyTensor(two_body_integrals, ordering=kwargs["ordering"])
two_body_integrals.reorder(to="openfermion")
two_body_integrals = two_body_integrals.elems
if "nuclear_repulsion" in kwargs:
nuclear_repulsion = kwargs["nuclear_repulsion"]
else:
nuclear_repulsion = 0.0
warnings.warn("No nuclear_repulsion given for custom molecule, setting to zero", category=TequilaWarning)
if ("n_orbitals" in kwargs):
n_orbitals = kwargs["n_orbitals"]
else:
n_orbitals = one_body_integrals.shape[0]
for i in [0, 1, 2, 3]:
assert n_orbitals == two_body_integrals.shape[i]
molecule = MolecularData(**self.parameters.molecular_data_param)
molecule.one_body_integrals = one_body_integrals
molecule.two_body_integrals = two_body_integrals
molecule.nuclear_repulsion = nuclear_repulsion
molecule.n_orbitals = n_orbitals
if "n_electrons" in kwargs:
molecule.n_electrons = kwargs["n_electrons"]
molecule.save()
return molecule
@property
def n_orbitals(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_orbitals
else:
return len(self.active_space.active_orbitals)
@property
def n_electrons(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_electrons
else:
return 2 * len(self.active_space.active_reference_orbitals)
def make_hamiltonian(self, occupied_indices=None, active_indices=None, threshold=1.e-8) -> QubitHamiltonian:
""" """
if occupied_indices is None and self.active_space is not None:
occupied_indices = self.active_space.frozen_reference_orbitals
if active_indices is None and self.active_space is not None:
active_indices = self.active_space.active_orbitals
fop = openfermion.transforms.get_fermion_operator(
self.molecule.get_molecular_hamiltonian(occupied_indices, active_indices))
try:
qop = self.transformation(fop)
except TypeError:
qop = self.transformation(openfermion.transforms.get_interaction_operator(fop))
qop.is_hermitian()
return qop
def make_hardcore_boson_hamiltonian(self):
if not self.transformation.up_then_down:
warnings.warn(
"Hardcore-Boson Hamiltonian without reordering will result in non-consecutive Hamiltonians that are eventually not be combinable with other features of tequila. Try transformation=\'ReorderedJordanWigner\' or similar for more consistency",
TequilaWarning)
# integrate with QubitEncoding at some point
n_orbitals = self.n_orbitals
c, obt, tbt = self.get_integrals()
h = numpy.zeros(shape=[n_orbitals] * 2)
g = numpy.zeros(shape=[n_orbitals] * 2)
for p in range(n_orbitals):
h[p, p] += 2 * obt[p, p]
for q in range(n_orbitals):
h[p, q] += + tbt[p, p, q, q]
if p != q:
g[p, q] += 2 * tbt[p, q, q, p] - tbt[p, q, p, q]
H = c
for p in range(n_orbitals):
for q in range(n_orbitals):
up = p
uq = q
H += h[p, q] * Sm(up) * Sp(uq) + g[p, q] * Sm(up) * Sp(up) * Sm(uq) * Sp(uq)
return H
def make_molecular_hamiltonian(self):
if self.active_space:
return self.molecule.get_molecular_hamiltonian(occupied_indices=self.active_space.frozen_reference_orbitals,
active_indices=self.active_space.active_orbitals)
else:
return self.molecule.get_molecular_hamiltonian()
def get_integrals(self, two_body_ordering="openfermion"):
"""
Returns
-------
Tuple with:
constant part (nuclear_repulsion + possible integrated parts from active-spaces)
one_body_integrals
two_body_integrals
"""
if self.active_space is not None and len(self.active_space.frozen_reference_orbitals) > 0:
c, h1, h2 = self.molecule.get_active_space_integrals(active_indices=self.active_space.active_orbitals,
occupied_indices=self.active_space.frozen_reference_orbitals)
else:
c = 0.0
h1 = self.molecule.one_body_integrals
h2 = self.molecule.two_body_integrals
c += self.molecule.nuclear_repulsion
h2 = NBodyTensor(h2, ordering="openfermion")
h2 = h2.reorder(to=two_body_ordering).elems
return c, h1, h2
def compute_one_body_integrals(self):
""" convenience function """
c, h1, h2 = self.get_integrals()
return h1
def compute_two_body_integrals(self, two_body_ordering="openfermion"):
""" """
c, h1, h2 = self.get_integrals(two_body_ordering=two_body_ordering)
return h2
def compute_constant_part(self):
c, h1, h2 = self.get_integrals()
return c
def compute_ccsd_amplitudes(self) -> ClosedShellAmplitudes:
""" """
raise Exception("BaseClass Method")
def prepare_reference(self, state=None, *args, **kwargs):
"""
Returns
-------
A tequila circuit object which prepares the reference of this molecule in the chosen transformation
"""
if state is None:
assert self.n_electrons %2 == 0
state = [0]*(self.n_orbitals*2)
for i in range(self.n_electrons):
state[i]=1
reference_state = BitString.from_array(self.transformation.map_state(state=state))
U = prepare_product_state(reference_state)
# prevent trace out in direct wfn simulation
U.n_qubits = self.n_orbitals*2 # adapt when tapered transformations work
return U
def prepare_hardcore_boson_reference(self):
# HF state in the HCB representation (paired electrons)
U = gates.X(target=[i for i in range(self.n_electrons // 2)])
U.n_qubits = self.n_orbitals
return U
def hcb_to_me(self, U=None):
"""
Transform a circuit in the hardcore-boson encoding (HCB)
to the encoding of this molecule
HCB is supposed to be encoded on the first n_orbitals qubits
Parameters
----------
U: HCB circuit (using the alpha qubits)
Returns
-------
"""
if U is None:
U = QCircuit()
# consistency
consistency = [x < self.n_orbitals for x in U.qubits]
if not all(consistency):
warnings.warn(
"hcb_to_me: given circuit is not defined on the first {} qubits. Is this a HCB circuit?".format(
self.n_orbitals))
# map to alpha qubits
alpha_map = {k: self.transformation.up(k) for k in range(self.n_orbitals)}
alpha_U = U.map_qubits(qubit_map=alpha_map)
UX = self.transformation.hcb_to_me()
if UX is None:
raise TequilaException(
"transformation={} has no hcb_to_me function implemented".format(self.transformation))
return alpha_U + UX
def get_pair_specific_indices(self,
pair_info: str = None,
include_singles: bool = True,
general_excitations: bool = True) -> list:
"""
Assuming a pair-specific model, create a pair-specific index list
to be used in make_upccgsd_ansatz(indices = ... )
Excite from a set of references (i) to any pair coming from (i),
i.e. any (i,j)/(j,i). If general excitations are allowed, also
allow excitations from pairs to appendant pairs and reference.
Parameters
----------
pair_info
file or list including information about pair structure
references single number, pair double
example: as file: "0,1,11,11,00,10" (hand over file name)
in file, skip first row assuming some text with information
as list:['0','1`','11','11','00','10']
~> two reference orbitals 0 and 1,
then two orbitals from pair 11, one from 00, one mixed 10
include_singles
include single excitations
general_excitations
allow general excitations
Returns
-------
list of indices with pair-specific ansatz
"""
if pair_info is None:
raise TequilaException("Need to provide some pair information.")
# If pair-information given on file, load (layout see above)
if isinstance(pair_info, str):
pairs = numpy.loadtxt(pair_info, dtype=str, delimiter=",", skiprows=1)
elif isinstance(pair_info, list):
pairs = pair_info
elif not isinstance(pair_info, list):
raise TequilaException("Pair information needs to be contained in a list or filename.")
connect = [[]] * len(pairs)
# determine "connectivity"
generalized = 0
for idx, p in enumerate(pairs):
if len(p) == 1:
connect[idx] = [i for i in range(len(pairs))
if ((len(pairs[i]) == 2) and (str(idx) in pairs[i]))]
elif (len(p) == 2) and general_excitations:
connect[idx] = [i for i in range(len(pairs))
if (((p[0] in pairs[i]) or (p[1] in pairs[i]) or str(i) in p)
and not (i == idx))]
elif len(p) > 2:
raise TequilaException("Invalid reference of pair id.")
# create generating indices from connectivity
indices = []
for i, to in enumerate(connect):
for a in to:
indices.append(((2 * i, 2 * a), (2 * i + 1, 2 * a + 1)))
if include_singles:
indices.append(((2 * i, 2 * a)))
indices.append(((2 * i + 1, 2 * a + 1)))
return indices
def format_excitation_indices(self, idx):
"""
Consistent formatting of excitation indices
idx = [(p0,q0),(p1,q1),...,(pn,qn)]
sorted as: p0<p1<pn and pi<qi
:param idx: list of index tuples describing a single(!) fermionic excitation
:return: tuple-list of index tuples
"""
idx = [tuple(sorted(x)) for x in idx]
idx = sorted(idx, key=lambda x: x[0])
return tuple(idx)
def make_upccgsd_indices(self, key, reference_orbitals=None, *args, **kwargs):
if reference_orbitals is None:
reference_orbitals = [i for i in range(self.n_electrons // 2)]
indices = []
# add doubles in hcb encoding
if hasattr(key, "lower") and key.lower() == "ladder":
# ladder structure of the pair excitations
# ensures local connectivity
indices = [[(n, n + 1)] for n in range(self.n_orbitals - 1)]
elif hasattr(key, "lower") and "g" not in key.lower():
indices = [[(n, m)] for n in reference_orbitals for m in range(self.n_orbitals) if
n < m and m not in reference_orbitals]
elif hasattr(key, "lower") and "g" in key.lower():
indices = [[(n, m)] for n in range(self.n_orbitals) for m in range(self.n_orbitals) if n < m]
else:
raise TequilaException("Unknown recipe: {}".format(key))
indices = [self.format_excitation_indices(idx) for idx in indices]
return indices
def make_hardcore_boson_upccgd_layer(self,
indices: list = "UpCCGD",
label: str = None,
assume_real: bool = True,
*args, **kwargs):
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices.lower())
UD = QCircuit()
for idx in indices:
UD += self.make_hardcore_boson_excitation_gate(indices=idx, angle=(idx, "D", label),
assume_real=assume_real)
return UD
def make_ansatz(self, name:str, *args, **kwargs):
name = name.lower()
if name.strip()=="":
return QCircuit()
if "+" in name:
U = QCircuit()
subparts = name.split("+")
U = self.make_ansatz(name=subparts[0], *args ,**kwargs)
if "include_reference" in kwargs:
kwargs.pop("include_reference")
if "hcb_optimization" in kwargs:
kwargs.pop("hcb_optimization")
for subpart in subparts[1:]:
U += self.make_ansatz(name=subpart, *args, include_reference=False, hcb_optimization=False, **kwargs)
return U
if name=="uccsd":
return self.make_uccsd_ansatz(*args, **kwargs)
elif "d" in name or "s" in name:
return self.make_upccgsd_ansatz(name=name, *args, **kwargs)
else:
raise TequilaException("unknown ansatz with name={}".format(name))
def make_upccgsd_ansatz(self,
include_reference: bool = True,
name: str = "UpCCGSD",
label: str = None,
order: int = None,
assume_real: bool = True,
hcb_optimization: bool = None,
spin_adapt_singles: bool = True,
neglect_z = False,
*args, **kwargs):
"""
UpGCCSD Ansatz similar as described by Lee et. al.
Parameters
----------
include_singles
include singles excitations. Is overwritten if indices are a string (i.e. indices=UpCCGSD will always include singles, UpCCGD will not)
include_reference
include the HF reference state as initial state
indices
pass custom defined set of indices from which the ansatz will be created
List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]
label
An additional label that is set with the variables
default is None and no label will be set: variables names will be
(x, (p,q)) for x in range(order)
with a label the variables will be named
(label, (x, (p,q)))
order
Order of the ansatz (default is 1)
determines how often the ordering gets repeated
parameters of repeating layers are independent
assume_real
assume a real wavefunction (that is always the case if the reference state is real)
reduces potential gradient costs from 4 to 2
Returns
-------
UpGCCSD ansatz
"""
name = name.upper()
if ("A" in name) and neglect_z is None:
neglect_z = True
else:
neglect_z = False
if order is None:
try:
if "-" in name:
order = int(name.split("-")[0])
else:
order = 1
except:
order = 1
indices = self.make_upccgsd_indices(key=name)
# check if the used qubit encoding has a hcb transformation
have_hcb_trafo = self.transformation.hcb_to_me() is not None
# consistency checks for optimization
if have_hcb_trafo and hcb_optimization is None:
hcb_optimization = True
if "HCB" in name:
hcb_optimization = True
if hcb_optimization and not have_hcb_trafo and "HCB" not in name:
raise TequilaException(
"use_hcb={} but transformation={} has no \'hcb_to_me\' function. Try transformation=\'ReorderedJordanWigner\'".format(
hcb_optimization, self.transformation))
if "S" in name and "HCB" in name:
if "HCB" in name and "S" in name:
raise Exception(
"name={}, Singles can't be realized without mapping back to the standard encoding leave S or HCB out of the name".format(
name))
# first layer
if not hcb_optimization:
U = QCircuit()
if include_reference:
U = self.prepare_reference()
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, assume_real=assume_real,
label=(label, 0), spin_adapt_singles=spin_adapt_singles, *args, **kwargs)
else:
U = QCircuit()
if include_reference:
U = self.prepare_hardcore_boson_reference()
U += self.make_hardcore_boson_upccgd_layer(indices=indices, assume_real=assume_real, label=(label, 0),
*args, **kwargs)
if "HCB" not in name:
U = self.hcb_to_me(U=U)
if "S" in name:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=(label, 0),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z, *args, **kwargs)
for k in range(1, order):
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, label=(label, k),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z)
return U
def make_upccgsd_layer(self, indices, include_singles=True, include_doubles=True, assume_real=True, label=None,
spin_adapt_singles: bool = True, angle_transform=None, mix_sd=False, neglect_z=False, *args, **kwargs):
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
angle = (tuple([idx]), "D", label)
if include_doubles:
if "jordanwigner" in self.transformation.name.lower() and not self.transformation.up_then_down:
# we can optimize with qubit excitations for the JW representation
target=[self.transformation.up(idx[0]), self.transformation.up(idx[1]), self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle,
indices=((2 * idx[0], 2 * idx[1]), (2 * idx[0] + 1, 2 * idx[1] + 1)),
assume_real=assume_real, **kwargs)
if include_singles and mix_sd:
U += self.make_upccgsd_singles(indices=[idx], assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
if include_singles and not mix_sd:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
return U
def make_upccgsd_singles(self, indices="UpCCGSD", spin_adapt_singles=True, label=None, angle_transform=None,
assume_real=True, neglect_z=False, *args, **kwargs):
if neglect_z and "jordanwigner" not in self.transformation.name.lower():
raise TequilaException("neglegt-z approximation in UpCCGSD singles needs the (Reversed)JordanWigner representation")
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices)
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
if spin_adapt_singles:
angle = (idx, "S", label)
if angle_transform is not None:
angle = angle_transform(angle)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=targeta, assume_real=assume_real, **kwargs)
U += gates.QubitExcitation(angle=angle, target=targetb, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0], 2 * idx[1])], assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
else:
angle1 = (idx, "SU", label)
angle2 = (idx, "SD", label)
if angle_transform is not None:
angle1 = angle_transform(angle1)
angle2 = angle_transform(angle2)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle1, target=targeta, assume_real=assume_real, *kwargs)
U += gates.QubitExcitation(angle=angle2, target=targetb, assume_real=assume_real, *kwargs)
else:
U += self.make_excitation_gate(angle=angle1, indices=[(2 * idx[0], 2 * idx[1])],
assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle2, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
return U
def make_uccsd_ansatz(self, trotter_steps: int=1,
initial_amplitudes: typing.Union[str, Amplitudes, ClosedShellAmplitudes] = "mp2",
include_reference_ansatz=True,
parametrized=True,
threshold=1.e-8,
add_singles=None,
*args, **kwargs) -> QCircuit:
"""
Parameters
----------
initial_amplitudes :
initial amplitudes given as ManyBodyAmplitudes structure or as string
where 'mp2', 'cc2' or 'ccsd' are possible initializations
include_reference_ansatz :
Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)
parametrized :
Initialize with variables, otherwise with static numbers (Default value = True)
trotter_steps: int :
initial_amplitudes: typing.Union[str :
Amplitudes :
ClosedShellAmplitudes] :
(Default value = "cc2")
Returns
-------
type
Parametrized QCircuit
"""
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2" and add_singles is None:
add_singles=True
elif initial_amplitudes is not None and add_singles is not None:
warnings.warn("make_uccsd_anstatz: add_singles has no effect when explicit amplitudes are passed down", TequilaWarning)
elif add_singles is None:
add_singles=True
if self.n_electrons % 2 != 0:
raise TequilaException("make_uccsd_ansatz currently only for closed shell systems")
nocc = self.n_electrons // 2
nvirt = self.n_orbitals - nocc
Uref = QCircuit()
if include_reference_ansatz:
Uref = self.prepare_reference()
amplitudes = initial_amplitudes
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2":
amplitudes = self.compute_mp2_amplitudes()
elif initial_amplitudes.lower() == "ccsd":
amplitudes = self.compute_ccsd_amplitudes()
else:
try:
amplitudes = self.compute_amplitudes(method=initial_amplitudes.lower())
except Exception as exc:
raise TequilaException(
"{}\nDon't know how to initialize \'{}\' amplitudes".format(exc, initial_amplitudes))
if amplitudes is None:
tia=None
if add_singles: tia=numpy.zeros(shape=[nocc, nvirt])
amplitudes = ClosedShellAmplitudes(
tIjAb=numpy.zeros(shape=[nocc, nocc, nvirt, nvirt]),
tIA=tia)
closed_shell = isinstance(amplitudes, ClosedShellAmplitudes)
indices = {}
if not isinstance(amplitudes, dict):
amplitudes = amplitudes.make_parameter_dictionary(threshold=threshold)
amplitudes = dict(sorted(amplitudes.items(), key=lambda x: numpy.fabs(x[1]), reverse=True))
for key, t in amplitudes.items():
assert (len(key) % 2 == 0)
if not numpy.isclose(t, 0.0, atol=threshold):
if closed_shell:
if len(key) == 2 and add_singles:
# singles
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_a = (2*key[0], 2*key[1])
idx_b = (2*key[0]+1, 2*key[1]+1)
indices[idx_a]=angle
indices[idx_b]=angle
else:
assert len(key)==4
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_abab=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2], 2 * key[3])
indices[idx_abab]=angle
if key[0]!=key[2] and key[1]!=key[3]:
idx_aaaa=(2 * key[0], 2 * key[1], 2 * key[2], 2 * key[3])
idx_bbbb=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2]+1, 2 * key[3]+1)
partner = tuple([key[2], key[1], key[0], key[3]])
anglex=2.0*(t - amplitudes[partner])
if parametrized:
anglex=2.0*(Variable(name=key) - Variable(partner))
indices[idx_aaaa]=anglex
indices[idx_bbbb]=anglex
else:
raise Exception("only closed-shell supported, please assemble yourself .... sorry :-)")
UCCSD = QCircuit()
factor = 1.0 / trotter_steps
for step in range(trotter_steps):
for idx, angle in indices.items():
UCCSD += self.make_excitation_gate(indices=idx, angle=factor * angle)
if hasattr(initial_amplitudes,"lower") and initial_amplitudes.lower()=="mp2" and parametrized and add_singles:
# mp2 has no singles, need to initialize them here (if not parametrized initializling as 0.0 makes no sense though)
UCCSD += self.make_upccgsd_layer(indices="upccsd", include_singles=True, include_doubles=False)
return Uref + UCCSD
def compute_amplitudes(self, method: str, *args, **kwargs):
"""
Compute closed-shell CC amplitudes
Parameters
----------
method :
coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)
Success might depend on backend
got an extra function for MP2
*args :
**kwargs :
Returns
-------
"""
raise TequilaException("compute amplitudes: Needs to be overwritten by backend")
# MASKED: compute_mp2_amplitudes function (lines 1592-1621)
def compute_cis_amplitudes(self):
"""
Compute the CIS amplitudes of the molecule
"""
@dataclass
class ResultCIS:
""" """
omegas: typing.List[numbers.Real] # excitation energies [omega0, ...]
amplitudes: typing.List[ClosedShellAmplitudes] # corresponding amplitudes [x_{ai}_0, ...]
def __getitem__(self, item):
return (self.omegas[item], self.amplitudes[item])
def __len__(self):
return len(self.omegas)
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.n_alpha_electrons
nvirt = self.n_orbitals - nocc
pairs = []
for i in range(nocc):
for a in range(nocc, nocc + nvirt):
pairs.append((a, i))
M = numpy.ndarray(shape=[len(pairs), len(pairs)])
for xx, x in enumerate(pairs):
eia = fij[x[0]] - fij[x[1]]
a, i = x
for yy, y in enumerate(pairs):
b, j = y
delta = float(y == x)
gpart = 2.0 * g[a, i, b, j] - g[a, i, j, b]
M[xx, yy] = eia * delta + gpart
omega, xvecs = numpy.linalg.eigh(M)
# convert amplitudes to ndarray sorted by excitation energy
nex = len(omega)
amplitudes = []
for ex in range(nex):
t = numpy.ndarray(shape=[nvirt, nocc])
exvec = xvecs[ex]
for xx, x in enumerate(pairs):
a, i = x
t[a - nocc, i] = exvec[xx]
amplitudes.append(ClosedShellAmplitudes(tIA=t))
return ResultCIS(omegas=list(omega), amplitudes=amplitudes)
@property
def rdm1(self):
"""
Returns RMD1 if computed with compute_rdms function before
"""
if self._rdm1 is not None:
return self._rdm1
else:
print("1-RDM has not been computed. Return None for 1-RDM.")
return None
@property
def rdm2(self):
"""
Returns RMD2 if computed with compute_rdms function before
This is returned in Dirac (physics) notation by default (can be changed in compute_rdms with keyword)!
"""
if self._rdm2 is not None:
return self._rdm2
else:
print("2-RDM has not been computed. Return None for 2-RDM.")
return None
def compute_rdms(self, U: QCircuit = None, variables: Variables = None, spin_free: bool = True,
get_rdm1: bool = True, get_rdm2: bool = True, ordering="dirac"):
"""
Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given
a unitary U. This method uses the standard ordering in physics as denoted below.
Note, that the representation of the density matrices depends on the qubit transformation
used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density
matrices in the occupation picture.
We only consider real orbitals and thus real-valued RDMs.
The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.
.. math :
\\text{rdm1: } \\gamma^p_q = \\langle \\psi | a^p a_q | \\psi \\rangle
= \\langle U 0 | a^p a_q | U 0 \\rangle
\\text{rdm2: } \\gamma^{pq}_{rs} = \\langle \\psi | a^p a^q a_s a_r | \\psi \\rangle
= \\langle U 0 | a^p a^q a_s a_r | U 0 \\rangle
Parameters
----------
U :
Quantum Circuit to achieve the desired state \\psi = U |0\\rangle, non-optional
variables :
If U is parametrized, then need to hand over a set of fixed variables
spin_free :
Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals
get_rdm1, get_rdm2 :
Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,
it is recommended to compute them at once.
Returns
-------
"""
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
# Check whether transformation is BKSF.
# Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct
# transformation, because it computes the number of qubits incorrectly in this case.
# A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet.")
# Set up number of spin-orbitals and molecular orbitals respectively
n_SOs = 2 * self.n_orbitals
n_MOs = self.n_orbitals
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
def _get_of_op(operator_tuple):
""" Returns operator given by a operator tuple as OpenFermion - Fermion operator """
op = openfermion.FermionOperator(operator_tuple)
return op
def _get_qop_hermitian(of_operator) -> QubitHamiltonian:
""" Returns Hermitian part of Fermion operator as QubitHamiltonian """
qop = self.transformation(of_operator)
#qop = QubitHamiltonian(self.transformation(of_operator))
real, imag = qop.split(hermitian=True)
if real:
return real
elif not real:
raise TequilaException(
"Qubit Hamiltonian does not have a Hermitian part. Operator ={}".format(of_operator))
def _build_1bdy_operators_spinful() -> list:
""" Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp
ops = []
for p in range(n_SOs):
for q in range(p + 1):
op_tuple = ((p, 1), (q, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinful() -> list:
""" Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = -pqsr = -qprs = qpsr
# and = rspq
ops = []
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
op_tuple = ((p, 1), (q, 1), (s, 0), (r, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_1bdy_operators_spinfree() -> list:
""" Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp (not changed by spin-summation)
ops = []
for p in range(n_MOs):
for q in range(p + 1):
# Spin aa
op_tuple = ((2 * p, 1), (2 * q, 0))
op = _get_of_op(op_tuple)
# Spin bb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 0))
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinfree() -> list:
""" Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out)
# and = rspq
ops = []
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
# Spin aaaa
op_tuple = ((2 * p, 1), (2 * q, 1), (2 * s, 0), (2 * r, 0)) if (p != q and r != s) else '0.0 []'
op = _get_of_op(op_tuple)
# Spin abab
op_tuple = ((2 * p, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r, 0)) if (
2 * p != 2 * q + 1 and 2 * r != 2 * s + 1) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin baba
op_tuple = ((2 * p + 1, 1), (2 * q, 1), (2 * s, 0), (2 * r + 1, 0)) if (
2 * p + 1 != 2 * q and 2 * r + 1 != 2 * s) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin bbbb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r + 1, 0)) if (
p != q and r != s) else '0.0 []'
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _assemble_rdm1(evals) -> numpy.ndarray:
"""
Returns spin-ful or spin-free one-particle RDM built by symmetry conditions
Same symmetry with or without spin, so we can use the same function
"""
N = n_MOs if spin_free else n_SOs
rdm1 = numpy.zeros([N, N])
ctr: int = 0
for p in range(N):
for q in range(p + 1):
rdm1[p, q] = evals[ctr]
# Symmetry pq = qp
rdm1[q, p] = rdm1[p, q]
ctr += 1
return rdm1
def _assemble_rdm2_spinful(evals) -> numpy.ndarray:
""" Returns spin-ful two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_SOs, n_SOs, n_SOs, n_SOs])
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetries due to anticommutation relations
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
rdm2[p, q, s, r] = -1 * rdm2[p, q, r, s] # pqrs = -pqsr
rdm2[q, p, r, s] = -1 * rdm2[p, q, r, s] # pqrs = -qprs
rdm2[q, p, s, r] = rdm2[p, q, r, s] # pqrs = qpsr
return rdm2
def _assemble_rdm2_spinfree(evals) -> numpy.ndarray:
""" Returns spin-free two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetry: pqrs = qpsr
for p, q, r, s in product(range(n_MOs), repeat=4):
if p >= q or r >= s:
rdm2[q, p, s, r] = rdm2[p, q, r, s]
return rdm2
# Build operator lists
qops = []
if spin_free:
qops += _build_1bdy_operators_spinfree() if get_rdm1 else []
qops += _build_2bdy_operators_spinfree() if get_rdm2 else []
else:
qops += _build_1bdy_operators_spinful() if get_rdm1 else []
qops += _build_2bdy_operators_spinful() if get_rdm2 else []
# Transform operator lists to QubitHamiltonians
qops = [_get_qop_hermitian(op) for op in qops]
# Compute expected values
evals = simulate(ExpectationValue(H=qops, U=U, shape=[len(qops)]), variables=variables)
# Assemble density matrices
# If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type
def _reset_rdm(rdm):
if rdm is not None:
if spin_free and rdm.shape[0] != n_MOs:
return None
if not spin_free and rdm.shape[0] != n_SOs:
return None
return rdm
self._rdm1 = _reset_rdm(self._rdm1)
self._rdm2 = _reset_rdm(self._rdm2)
# Split expectation values in 1- and 2-particle expectation values
if get_rdm1:
len_1 = n_MOs * (n_MOs + 1) // 2 if spin_free else n_SOs * (n_SOs + 1) // 2
else:
len_1 = 0
evals_1, evals_2 = evals[:len_1], evals[len_1:]
# Build matrices using the expectation values
self._rdm1 = _assemble_rdm1(evals_1) if get_rdm1 else self._rdm1
if spin_free:
self._rdm2 = _assemble_rdm2_spinfree(evals_2) if get_rdm2 else self._rdm2
else:
self._rdm2 = _assemble_rdm2_spinful(evals_2) if get_rdm2 else self._rdm2
if get_rdm2:
rdm2 = NBodyTensor(elems=self.rdm2, ordering="dirac")
rdm2.reorder(to=ordering)
rdm2 = rdm2.elems
self._rdm2 = rdm2
if get_rdm1:
if get_rdm2:
return self.rdm1, self.rdm2
else:
return self.rdm1
elif get_rdm2:
return self.rdm2
else:
warnings.warn("compute_rdms called with instruction to not compute?", TequilaWarning)
def rdm_spinsum(self, sum_rdm1: bool = True, sum_rdm2: bool = True) -> tuple:
"""
Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.
Parameters
----------
sum_rdm1, sum_rdm2 :
If set to true, perform spin summation on rdm1, rdm2
Returns
-------
rdm1_spinsum, rdm2_spinsum :
The desired spin-free matrices
"""
n_MOs = self.n_orbitals
rdm1_spinsum = None
rdm2_spinsum = None
# Spin summation on rdm1
if sum_rdm1:
# Check whether spin-rdm2 exists
if self._rdm1 is None:
raise TequilaException("The spin-RDM for the 1-RDM does not exist!")
# Check whether existing rdm1 is in spin-orbital basis
if self._rdm1.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm1_spinsum = numpy.zeros([n_MOs, n_MOs])
for p in range(n_MOs):
for q in range(p + 1):
rdm1_spinsum[p, q] += self._rdm1[2 * p, 2 * q]
rdm1_spinsum[p, q] += self._rdm1[2 * p + 1, 2 * q + 1]
for p in range(n_MOs):
for q in range(p):
rdm1_spinsum[q, p] = rdm1_spinsum[p, q]
# Spin summation on rdm2
if sum_rdm2:
# Check whether spin-rdm2 exists
if self._rdm2 is None:
raise TequilaException("The spin-RDM for the 2-RDM does not exist!")
# Check whether existing rdm2 is in spin-orbital basis
if self._rdm2.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm2_spinsum = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q, 2 * r, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1]
return rdm1_spinsum, rdm2_spinsum
def perturbative_f12_correction(self, rdm1: numpy.ndarray = None, rdm2: numpy.ndarray = None,
gamma: float = 1.4, n_ri: int = None,
external_info: dict = None, **kwargs) -> float:
"""
Computes the spin-free [2]_R12 correction, needing only the 1- and 2-RDM of a reference method
Requires either 1-RDM, 2-RDM or information to compute them in kwargs
Parameters
----------
rdm1 :
1-electron reduced density matrix
rdm2 :
2-electron reduced density matrix
gamma :
f12-exponent, for a correlation factor f_12 = -1/gamma * exp[-gamma*r_12]
n_ri :
dimensionality of RI-basis; specify only, if want to truncate available RI-basis
if None, then the maximum available via tensors / basis-set is used
must not be larger than size of available RI-basis, and not smaller than size of OBS
for n_ri==dim(OBS), the correction returns zero
external_info :
for usage in qc_base, need to provide information where to find one-body tensor f12-tensor <rs|f_12|pq>;
pass dictionary with {"f12_filename": where to find f12-tensor, "scheme": ordering scheme of tensor}
kwargs :
e.g. RDM-information via {"U": QCircuit, "variables": optimal angles}, needs to be passed if rdm1,rdm2 not
yet computed
Returns
-------
the f12 correction for the energy
"""
from .f12_corrections._f12_correction_base import ExplicitCorrelationCorrection
correction = ExplicitCorrelationCorrection(mol=self, rdm1=rdm1, rdm2=rdm2, gamma=gamma,
n_ri=n_ri, external_info=external_info, **kwargs)
return correction.compute()
def __str__(self) -> str:
result = str(type(self)) + "\n"
result += "Qubit Encoding\n"
result += str(self.transformation) + "\n\n"
result += "Parameters\n"
for k, v in self.parameters.__dict__.items():
result += "{key:15} : {value:15} \n".format(key=str(k), value=str(v))
result += "\n"
return result
|
def compute_mp2_amplitudes(self) -> ClosedShellAmplitudes:
"""
Compute closed-shell mp2 amplitudes
.. math::
t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )
:return:
Parameters
----------
Returns
-------
"""
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.molecule.n_electrons // 2 # this is never the active space
ei = fij[:nocc]
ai = fij[nocc:]
abgij = g[nocc:, nocc:, :nocc, :nocc]
amplitudes = abgij * 1.0 / (
ei.reshape(1, 1, -1, 1) + ei.reshape(1, 1, 1, -1) - ai.reshape(-1, 1, 1, 1) - ai.reshape(1, -1, 1, 1))
E = 2.0 * numpy.einsum('abij,abij->', amplitudes, abgij) - numpy.einsum('abji,abij', amplitudes, abgij,
optimize='greedy')
self.molecule.mp2_energy = E + self.molecule.hf_energy
return ClosedShellAmplitudes(tIjAb=numpy.einsum('abij -> ijab', amplitudes, optimize='greedy'))
| 1,592
| 1,621
|
import os
from dataclasses import dataclass
from tequila import TequilaException, BitString, TequilaWarning
from tequila.hamiltonian import QubitHamiltonian
from tequila.wavefunction import QubitWaveFunction
from tequila.hamiltonian.paulis import Sp, Sm, Qp, Qm
from tequila.circuit import QCircuit, gates, _gates_impl
from tequila.objective.objective import Variable, Variables, ExpectationValue
from tequila.simulators.simulator_api import simulate
from tequila.utils import to_float
from tequila.objective import assign_variable
from .encodings import known_encodings
import typing, numpy, numbers, copy
from itertools import product
# if you are experiencing import errors you need to update openfermion
# required is version >= 1.0
# otherwise replace with from openfermion.hamiltonians import MolecularData
import openfermion
from openfermion.chem import MolecularData
import warnings
@dataclass
class ActiveSpaceData:
active_orbitals: list # active orbitals (spatial, c1)
reference_orbitals: list # reference orbitals (spatial, c1)
def __str__(self):
result = "Active Space Data:\n"
result += "{key:15} : {value:15} \n".format(key="active_orbitals", value=str(self.active_orbitals))
result += "{key:15} : {value:15} \n".format(key="reference_orbitals",
value=str(self.reference_orbitals))
result += "{key:15} : {value:15} \n".format(key="frozen_docc", value=str(self.frozen_docc))
result += "{key:15} : {value:15} \n".format(key="frozen_uocc", value=str(self.frozen_uocc))
return result
@property
def frozen_reference_orbitals(self):
return [i for i in self.reference_orbitals if i not in self.active_orbitals]
@property
def active_reference_orbitals(self):
return [i for i in self.reference_orbitals if i in self.active_orbitals]
class FermionicGateImpl(gates.QubitExcitationImpl):
# keep the overview in circuits
def __init__(self, generator, p0, transformation, *args, **kwargs):
super().__init__(generator=generator, target=generator.qubits, p0=p0, *args, **kwargs)
self._name = "FermionicExcitation"
self.transformation=transformation
def compile(self):
return gates.Trotterized(generator=self.generator, control=self.control, angle=self.parameter, steps=1)
def prepare_product_state(state: BitString) -> QCircuit:
"""Small convenience function
Parameters
----------
state :
product state encoded into a bitstring
state: BitString :
Returns
-------
type
unitary circuit which prepares the product state
"""
result = QCircuit()
for i, v in enumerate(state.array):
if v == 1:
result += gates.X(target=i)
return result
@dataclass
class ParametersQC:
"""Specialization of ParametersHamiltonian"""
basis_set: str = None # Quantum chemistry basis set
geometry: str = None # geometry of the underlying molecule (units: Angstrom!),
# this can be a filename leading to an .xyz file or the geometry given as a string
description: str = ""
multiplicity: int = 1
charge: int = 0
name: str = None
@property
def n_electrons(self, *args, **kwargs):
return self.get_nuc_charge() - self.charge
def get_nuc_charge(self):
return sum(self.get_atom_number(name=atom) for atom in self.get_atoms())
def get_atom_number(self, name):
atom_numbers={"h":1, "he":2, "li":3, "be":4, "b":5, "c":6, "n":7, "o":8, "f":9, "ne":10, "na":11, "mg":12, "al":13, "si":14, "ph":15, "s":16, "cl":17, "ar":18}
if name.lower() in atom_numbers:
return atom_numbers[name.lower()]
try:
import periodictable as pt
atom=name.lower()
atom[0]=atom[0].upper()
element = pt.elements.symbol(atom)
return element.number()
except:
raise TequilaException("can not assign atomic number to element {}\npip install periodictable will fix it".format(atom))
def get_atoms(self):
return [x[0] for x in self.get_geometry()]
def __post_init__(self,*args, **kwargs):
if self.name is None and self.geometry is None:
raise TequilaException("no geometry or name given to molecule\nprovide geometry=filename.xyz or geometry=`h 0.0 0.0 0.0\\n...`\nor name=whatever with file whatever.xyz being present")
# auto naming
if self.name is None:
if ".xyz" in self.geometry:
self.name=self.geometry.split(".xyz")[0]
if self.description is None:
coord, description = self.read_xyz_from_file()
self.description=description
else:
atoms=self.get_atoms()
atom_names=sorted(list(set(atoms)), key=lambda x: self.get_atom_number(x), reverse=True)
if self.name is None:
drop_ones=lambda x: "" if x==1 else x
self.name="".join(["{}{}".format(x,drop_ones(atoms.count(x))) for x in atom_names])
self.name = self.name.lower()
if self.geometry is None:
self.geometry=self.name+".xyz"
if ".xyz" in self.geometry and not os.path.isfile(self.geometry):
raise TequilaException("could not find file for molecular coordinates {}".format(self.geometry))
@property
def filename(self):
""" """
return "{}_{}".format(self.name, self.basis_set)
@property
def molecular_data_param(self) -> dict:
""":return: Give back all parameters for the MolecularData format from openfermion as dictionary"""
return {'basis': self.basis_set, 'geometry': self.get_geometry(), 'description': self.description,
'charge': self.charge, 'multiplicity': self.multiplicity, 'filename': self.filename
}
@staticmethod
def format_element_name(string):
"""OpenFermion uses case sensitive hash tables for chemical elements
I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work
this convenience function does the naming
:return: first letter converted to upper rest to lower
Parameters
----------
string :
Returns
-------
"""
assert (len(string) > 0)
assert (isinstance(string, str))
fstring = string[0].upper() + string[1:].lower()
return fstring
@staticmethod
def convert_to_list(geometry):
"""Convert a molecular structure given as a string into a list suitable for openfermion
Parameters
----------
geometry :
a string specifying a mol. structure. E.g. geometry="h 0.0 0.0 0.0\n h 0.0 0.0 1.0"
Returns
-------
type
A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]
"""
result = []
# Remove blank lines
lines = [l for l in geometry.split("\n") if l]
for line in lines:
words = line.split()
# Pad coordinates
if len(words) < 4:
words += [0.0] * (4 - len(words))
try:
tmp = (ParametersQC.format_element_name(words[0]),
(float(words[1]), float(words[2]), float(words[3])))
result.append(tmp)
except ValueError:
print("get_geometry list unknown line:\n ", line, "\n proceed with caution!")
return result
def get_geometry_string(self) -> str:
"""returns the geometry as a string
:return: geometry string
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if comment is not None:
self.description = comment
return geomstring
else:
return self.geometry
def get_geometry(self):
"""Returns the geometry
If a xyz filename was given the file is read out
otherwise it is assumed that the geometry was given as string
which is then reformatted as a list usable as input for openfermion
:return: geometry as list
e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]
Units: Angstrom!
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if self.description == '':
self.description = comment
return self.convert_to_list(geomstring)
elif self.geometry is not None:
return self.convert_to_list(self.geometry)
else:
raise Exception("Parameters.qc.geometry is None")
@staticmethod
def read_xyz_from_file(filename):
"""Read XYZ filetype for molecular structures
https://en.wikipedia.org/wiki/XYZ_file_format
Units: Angstrom!
Parameters
----------
filename :
return:
Returns
-------
"""
with open(filename, 'r') as file:
content = file.readlines()
natoms = int(content[0])
comment = str(content[1]).strip('\n')
coord = ''
for i in range(natoms):
coord += content[2 + i]
return coord, comment
@dataclass
class ClosedShellAmplitudes:
""" """
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Returns
-------
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, J, A, B), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(nocc + A, I, nocc + B, J)] = value
if self.tIA is not None:
nocc = self.tIA.shape[0]
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(A + nocc, I)] = value
return dict(sorted(variables.items(), key=lambda x: numpy.abs(x[1]), reverse=True))
@dataclass
class Amplitudes:
"""Coupled-Cluster Amplitudes
We adopt the Psi4 notation for consistency
I,A for alpha
i,a for beta
Parameters
----------
Returns
-------
"""
@classmethod
def from_closed_shell(cls, cs: ClosedShellAmplitudes):
"""
Initialize from closed-shell Amplitude structure
Parameters
----------
cs: ClosedShellAmplitudes :
Returns
-------
"""
tijab = cs.tIjAb - numpy.einsum("ijab -> ijba", cs.tIjAb, optimize='greedy')
return cls(tIjAb=cs.tIjAb, tIA=cs.tIA, tiJaB=cs.tIjAb, tia=cs.tIA, tijab=tijab, tIJAB=tijab)
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
tiJaB: numpy.ndarray = None
tijab: numpy.ndarray = None
tIJAB: numpy.ndarray = None
tia: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Neglect amplitudes below the threshold
Returns
-------
Dictionary of tequila variables (hash is in the style of (a,i,b,j))
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, j, A, b), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + b) + 1, j + 1)] = value
for (i, J, a, B), value in numpy.ndenumerate(self.tiJaB):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + B), J)] = value
for (i, j, a, b), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + b) + 1, j + 1)] = value
for (I, J, A, B), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + B), J)] = value
if self.tIA is not None:
nocc = self.tIjAb.shape[0]
assert (self.tia.shape[0] == nocc)
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (A + nocc), 2 * I)] = value
for (i, a), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (a + nocc) + 1, 2 * i + 1)] = value
return variables
class NBodyTensor:
""" Convenience class for handling N-body tensors """
class Ordering:
def __init__(self, scheme):
if hasattr(scheme, "_scheme"):
scheme = scheme._scheme
elif hasattr(scheme, "scheme"):
scheme = scheme.scheme
self._scheme = self.assign_scheme(scheme)
def assign_scheme(self, scheme):
if scheme is None:
return "chem"
else:
scheme = str(scheme)
if scheme.lower() in ["mulliken", "chem", "c", "1122"]:
return "chem"
elif scheme.lower() in ["dirac", "phys", "p", "1212"]:
return "phys"
elif scheme.lower() in ["openfermion", "of", "o", "1221"]:
return "of"
else:
raise TequilaException(
"Unknown two-body tensor scheme {}. Supported are dirac, mulliken, and openfermion".format(scheme))
def is_phys(self):
return self._scheme == "phys"
def is_chem(self):
return self._scheme == "chem"
def is_of(self):
return self._scheme == "of"
def __init__(self, elems: numpy.ndarray = None, active_indices: list = None, ordering: str = None,
size_full: int = None):
"""
Parameters
----------
elems: Tensor data as numpy array
active_indices: List of active indices in total ordering
ordering: Ordering scheme for two body tensors
"dirac" or "phys": <12|g|12>
.. math::
g_{pqrs} = \\int d1 d2 p(1)q(2) g(1,2) r(1)s(2)
"mulliken" or "chem": (11|g|22)
.. math::
g_{pqrs} = \\int d1 d2 p(1)r(2) g(1,2) q(1)s(2)
"openfermion":
.. math:: [12|g|21]
g_{gqprs} = \\int d1 d2 p(1)q(2) g(1,2) s(1)r(2)
size_full
"""
# Set elements
self.elems = elems
# Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible
# representations
if active_indices is not None:
self.active_indices = active_indices
self._passive_indices = None
self._full_indices = None
self._indices_set: bool = False
# Determine order of tensor
# Assume, that tensor is entered in desired shape, not as flat array.
self.order = len(self.elems.shape)
# Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well
if size_full is None:
self._size_full = self.elems.shape[0]
else:
self._size_full = size_full
# 2-body tensors (<=> order 4) currently allow reordering
if self.order == 4:
self.ordering = self.Ordering(ordering)
else:
if ordering is not None:
raise Exception("Ordering only implemented for tensors of order 4 / 2-body tensors.")
self.ordering = None
def sub_lists(self, idx_lists: list = None) -> numpy.ndarray:
"""
Get subspace of tensor by a set of index lists
according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]
This essentially is an implementation of a non-contiguous slicing using numpy.take
Parameters
----------
idx_lists :
List of lists, each defining the desired subspace per axis
Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
# Check if index list has correct size
if len(idx_lists) != self.order:
raise Exception("Need to pass an index list for each dimension!" +
" Length of idx_lists needs to match order of tensor.")
# Perform slicing via numpy.take
out = self.elems
for ax in range(self.order):
if idx_lists[ax] is not None: # None means, we want the full space in this direction
out = numpy.take(out, idx_lists[ax], axis=ax)
return out
def set_index_lists(self):
""" Set passive and full index lists based on class inputs """
tmp_size = self._size_full
if self._size_full is None:
tmp_size = self.elems.shape[0]
self._passive_indices = [i for i in range(tmp_size)
if i not in self.active_indices]
self._full_indices = [i for i in range(tmp_size)]
def sub_str(self, name: str) -> numpy.ndarray:
"""
Get subspace of tensor by a string
Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.
Full space in this context may also be smaller than actual tensor dimension.
The specification of active space in this context only allows to pick a set from a list of orbitals, and
is not able to resolve an active space from irreducible representations.
Example for one-body tensor:
hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]
Parameters
----------
name :
String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
if not self._indices_set:
self.set_index_lists()
self._indices_set = True
if name is None:
raise Exception("No name specified.")
if len(name) != self.order:
raise Exception("Name does not match order of the tensor.")
if self.active_indices is None:
raise Exception("Need to set an active space in order to call this function.")
idx_lists = []
# Parse name as string of space indices
for char in name:
if char.lower() == 'a':
idx_lists.append(self.active_indices)
elif char.lower() == 'p':
idx_lists.append(self._passive_indices)
elif char.lower() == 'f':
if self._size_full is None:
idx_lists.append(None)
else:
idx_lists.append(self._full_indices)
else:
raise Exception("Need to specify a valid letter (a,p,f).")
out = self.sub_lists(idx_lists)
return out
def reorder(self, to: str = 'of'):
"""
Function to reorder tensors according to some convention.
Parameters
----------
to :
Ordering scheme of choice.
'openfermion', 'of' (default) :
openfermion - ordering, corresponds to integrals of the type
h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)
with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)
currently needed for dependencies on openfermion-library
'chem', 'c' :
quantum chemistry ordering, collect particle terms,
more convenient for real-space methods
h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)
This is output by psi4
'phys', 'p' :
typical physics ordering, integrals of type
h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)
with operators a^pq_rs = a^p a^q a_s a_r
Returns
-------
"""
if self.order != 4:
raise Exception('Reordering currently only implemented for two-body tensors.')
to = self.Ordering(to)
if self.ordering == to:
return self
elif self.ordering.is_chem():
if to.is_of():
self.elems = numpy.einsum("psqr -> pqrs", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("prqs -> pqrs", self.elems, optimize='greedy')
elif self.ordering.is_of():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> psqr", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("pqrs -> pqsr", self.elems, optimize='greedy')
elif self.ordering.is_phys():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> prqs", self.elems, optimize='greedy')
elif to.is_of():
self.elems = numpy.einsum("pqsr -> pqrs", self.elems, optimize='greedy')
return self
class QuantumChemistryBase:
def __init__(self, parameters: ParametersQC,
transformation: typing.Union[str, typing.Callable] = None,
active_orbitals: list = None,
*args,
**kwargs):
self.parameters = parameters
if "molecule" in kwargs:
self.molecule = kwargs["molecule"]
else:
self.molecule = self.make_molecule(*args, **kwargs)
assert (parameters.basis_set.lower() == self.molecule.basis.lower())
assert (parameters.multiplicity == self.molecule.multiplicity)
assert (parameters.charge == self.molecule.charge)
self.active_space = None
if active_orbitals is not None:
self.active_space = self._make_active_space_data(active_orbitals=active_orbitals)
self.transformation = self._initialize_transformation(transformation=transformation, *args, **kwargs)
self._rdm1 = None
self._rdm2 = None
def _initialize_transformation(self, transformation=None, *args, **kwargs):
if transformation is None:
transformation = "JordanWigner"
# filter out arguments to the transformation
trafo_args = {k.split("__")[1]: v for k, v in kwargs.items() if
(hasattr(k, "lower") and "transformation__" in k.lower())}
trafo_args["n_electrons"] = self.n_electrons
trafo_args["n_orbitals"] = self.n_orbitals
if hasattr(transformation, "upper"):
# format to conventions
transformation = transformation.replace("_", "").replace("-", "").upper()
encodings = known_encodings()
if transformation in encodings:
transformation = encodings[transformation](**trafo_args)
else:
raise TequilaException(
"Unkown Fermion-to-Qubit encoding {}. Try something like: {}".format(transformation,
list(encodings.keys())))
return transformation
def _make_active_space_data(self, active_orbitals, reference=None):
"""
Small helper function
Internal use only
Parameters
----------
active_orbitals: dictionary :
list: Give a list of spatial orbital indices
i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used
reference: (Default value=None)
List of orbitals which form the reference
Can be given in the same format as active_orbitals
If given as None then the first N_electron/2 orbitals are taken
for closed-shell systems.
Returns
-------
Dataclass with active indices and reference indices (in spatial notation)
"""
if active_orbitals is None:
return None
if reference is None:
# auto assignment only for closed-shell
assert (self.n_electrons % 2 == 0)
reference = sorted([i for i in range(self.n_electrons // 2)])
return ActiveSpaceData(active_orbitals=sorted(active_orbitals),
reference_orbitals=sorted(reference))
@classmethod
def from_openfermion(cls, molecule: openfermion.MolecularData,
transformation: typing.Union[str, typing.Callable] = None,
*args,
**kwargs):
"""
Initialize direclty from openfermion MolecularData object
Parameters
----------
molecule
The openfermion molecule
Returns
-------
The Tequila molecule
"""
parameters = ParametersQC(basis_set=molecule.basis, geometry=molecule.geometry,
description=molecule.description, multiplicity=molecule.multiplicity,
charge=molecule.charge)
return cls(parameters=parameters, transformation=transformation, molecule=molecule, *args, **kwargs)
def make_excitation_generator(self,
indices: typing.Iterable[typing.Tuple[int, int]],
form: str = None,
remove_constant_term: bool = True) -> QubitHamiltonian:
"""
Notes
----------
Creates the transformed hermitian generator of UCC type unitaries:
M(a^\dagger_{a_0} a_{i_0} a^\dagger{a_1}a_{i_1} ... - h.c.)
where the qubit map M depends is self.transformation
Parameters
----------
indices : typing.Iterable[typing.Tuple[int, int]] :
List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)
can also be given as one big list: [a_0, i_0, a_1, i_1 ...]
form : str : (Default value None):
Manipulate the generator to involution or projector
set form='involution' or 'projector'
the default is no manipulation which gives the standard fermionic excitation operator back
remove_constant_term: bool: (Default value True):
by default the constant term in the qubit operator is removed since it has no effect on the unitary it generates
if the unitary is controlled this might not be true!
Returns
-------
type
1j*Transformed qubit excitation operator, depends on self.transformation
"""
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet")
# check indices and convert to list of tuples if necessary
if len(indices) == 0:
raise TequilaException("make_excitation_operator: no indices given")
elif not isinstance(indices[0], typing.Iterable):
if len(indices) % 2 != 0:
raise TequilaException("make_excitation_generator: unexpected input format of indices\n"
"use list of tuples as [(a_0, i_0),(a_1, i_1) ...]\n"
"or list as [a_0, i_0, a_1, i_1, ... ]\n"
"you gave: {}".format(indices))
converted = [(indices[2 * i], indices[2 * i + 1]) for i in range(len(indices) // 2)]
else:
converted = indices
# convert everything to native python int
# otherwise openfermion will complain
converted = [(int(pair[0]), int(pair[1])) for pair in converted]
# convert to openfermion input format
ofi = []
dag = []
for pair in converted:
assert (len(pair) == 2)
ofi += [(int(pair[0]), 1),
(int(pair[1]), 0)] # openfermion does not take other types of integers like numpy.int64
dag += [(int(pair[0]), 0), (int(pair[1]), 1)]
op = openfermion.FermionOperator(tuple(ofi), 1.j) # 1j makes it hermitian
op += openfermion.FermionOperator(tuple(reversed(dag)), -1.j)
if isinstance(form, str) and form.lower() != 'fermionic':
# indices for all the Na operators
Na = [x for pair in converted for x in [(pair[0], 1), (pair[0], 0)]]
# indices for all the Ma operators (Ma = 1 - Na)
Ma = [x for pair in converted for x in [(pair[0], 0), (pair[0], 1)]]
# indices for all the Ni operators
Ni = [x for pair in converted for x in [(pair[1], 1), (pair[1], 0)]]
# indices for all the Mi operators
Mi = [x for pair in converted for x in [(pair[1], 0), (pair[1], 1)]]
# can gaussianize as projector or as involution (last is default)
if form.lower() == "p+":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, 0.5)
op += openfermion.FermionOperator(Ni + Ma, 0.5)
elif form.lower() == "p-":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, -0.5)
op += openfermion.FermionOperator(Ni + Ma, -0.5)
elif form.lower() == "g+":
op += openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
elif form.lower() == "g-":
op += openfermion.FermionOperator([], -1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, 1.0)
op += openfermion.FermionOperator(Ni + Ma, 1.0)
elif form.lower() == "p0":
# P0: we only construct P0 and don't keep the original generator
op = openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
else:
raise TequilaException(
"Unknown generator form {}, supported are G, P+, P-, G+, G- and P0".format(form))
qop = self.transformation(op)
# remove constant terms
# they have no effect in the unitary (if not controlled)
if remove_constant_term:
qop.qubit_operator.terms[tuple()] = 0.0
# check if the operator is hermitian and cast coefficients to floats
# in order to avoid trouble with the simulation backends
assert qop.is_hermitian()
for k, v in qop.qubit_operator.terms.items():
qop.qubit_operator.terms[k] = to_float(v)
qop = qop.simplify()
if len(qop) == 0:
warnings.warn("Excitation generator is a unit operator.\n"
"Non-standard transformations might not work with general fermionic operators\n"
"indices = " + str(indices), category=TequilaWarning)
return qop
def make_hardcore_boson_excitation_gate(self, indices, angle, control=None, assume_real=True, compile_options="optimize"):
target = []
for pair in indices:
assert len(pair) == 2
target += [pair[0], pair[1]]
consistency = [x < self.n_orbitals for x in target]
if not all(consistency):
raise TequilaException(
"make_hardcore_boson_excitation_gate: Inconsistencies in indices={}. Should be indexed from 0 ... n_orbitals={}".format(
indices, self.n_orbitals))
return gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, control=control, compile_options=compile_options)
def make_excitation_gate(self, indices, angle, control=None, assume_real=True, **kwargs):
"""
Initialize a fermionic excitation gate defined as
.. math::
e^{-i\\frac{a}{2} G}
with generator defines by the indices [(p0,q0),(p1,q1),...]
.. math::
G = i(\\prod_{k} a_{p_k}^\\dagger a_{q_k} - h.c.)
Parameters
----------
indices:
List of tuples that define the generator
angle:
Numeric or hashable type or tequila objective
control:
List of possible control qubits
assume_real:
Assume that the wavefunction will always stay real.
Will reduce potential gradient costs by a factor of 2
"""
generator = self.make_excitation_generator(indices=indices, remove_constant_term=control is None)
p0 = self.make_excitation_generator(indices=indices, form="P0", remove_constant_term=control is None)
return QCircuit.wrap_gate(
FermionicGateImpl(angle=angle, generator=generator, p0=p0, transformation=type(self.transformation).__name__.lower(), assume_real=assume_real, control=control, **kwargs))
def make_molecule(self, *args, **kwargs) -> MolecularData:
"""Creates a molecule in openfermion format by running psi4 and extracting the data
Will check for previous outputfiles before running
Will not recompute if a file was found
Parameters
----------
parameters :
An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4
The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file
Returns
-------
type
the molecule in openfermion.MolecularData format
"""
molecule = MolecularData(**self.parameters.molecular_data_param)
# try to load
do_compute = True
try:
import os
if os.path.exists(self.parameters.filename):
molecule.load()
do_compute = False
except OSError:
do_compute = True
if do_compute:
molecule = self.do_make_molecule(*args, **kwargs)
molecule.save()
return molecule
def do_make_molecule(self, *args, **kwargs):
"""
Parameters
----------
args
kwargs
Returns
-------
"""
# integrals need to be passed in base class
assert ("one_body_integrals" in kwargs)
assert ("two_body_integrals" in kwargs)
one_body_integrals = kwargs["one_body_integrals"]
two_body_integrals = kwargs["two_body_integrals"]
# tequila assumes "openfermion" ordering, integrals can however be passed
# down in other orderings, but it needs to be indicated by keyword
if "ordering" in kwargs:
two_body_integrals = NBodyTensor(two_body_integrals, ordering=kwargs["ordering"])
two_body_integrals.reorder(to="openfermion")
two_body_integrals = two_body_integrals.elems
if "nuclear_repulsion" in kwargs:
nuclear_repulsion = kwargs["nuclear_repulsion"]
else:
nuclear_repulsion = 0.0
warnings.warn("No nuclear_repulsion given for custom molecule, setting to zero", category=TequilaWarning)
if ("n_orbitals" in kwargs):
n_orbitals = kwargs["n_orbitals"]
else:
n_orbitals = one_body_integrals.shape[0]
for i in [0, 1, 2, 3]:
assert n_orbitals == two_body_integrals.shape[i]
molecule = MolecularData(**self.parameters.molecular_data_param)
molecule.one_body_integrals = one_body_integrals
molecule.two_body_integrals = two_body_integrals
molecule.nuclear_repulsion = nuclear_repulsion
molecule.n_orbitals = n_orbitals
if "n_electrons" in kwargs:
molecule.n_electrons = kwargs["n_electrons"]
molecule.save()
return molecule
@property
def n_orbitals(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_orbitals
else:
return len(self.active_space.active_orbitals)
@property
def n_electrons(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_electrons
else:
return 2 * len(self.active_space.active_reference_orbitals)
def make_hamiltonian(self, occupied_indices=None, active_indices=None, threshold=1.e-8) -> QubitHamiltonian:
""" """
if occupied_indices is None and self.active_space is not None:
occupied_indices = self.active_space.frozen_reference_orbitals
if active_indices is None and self.active_space is not None:
active_indices = self.active_space.active_orbitals
fop = openfermion.transforms.get_fermion_operator(
self.molecule.get_molecular_hamiltonian(occupied_indices, active_indices))
try:
qop = self.transformation(fop)
except TypeError:
qop = self.transformation(openfermion.transforms.get_interaction_operator(fop))
qop.is_hermitian()
return qop
def make_hardcore_boson_hamiltonian(self):
if not self.transformation.up_then_down:
warnings.warn(
"Hardcore-Boson Hamiltonian without reordering will result in non-consecutive Hamiltonians that are eventually not be combinable with other features of tequila. Try transformation=\'ReorderedJordanWigner\' or similar for more consistency",
TequilaWarning)
# integrate with QubitEncoding at some point
n_orbitals = self.n_orbitals
c, obt, tbt = self.get_integrals()
h = numpy.zeros(shape=[n_orbitals] * 2)
g = numpy.zeros(shape=[n_orbitals] * 2)
for p in range(n_orbitals):
h[p, p] += 2 * obt[p, p]
for q in range(n_orbitals):
h[p, q] += + tbt[p, p, q, q]
if p != q:
g[p, q] += 2 * tbt[p, q, q, p] - tbt[p, q, p, q]
H = c
for p in range(n_orbitals):
for q in range(n_orbitals):
up = p
uq = q
H += h[p, q] * Sm(up) * Sp(uq) + g[p, q] * Sm(up) * Sp(up) * Sm(uq) * Sp(uq)
return H
def make_molecular_hamiltonian(self):
if self.active_space:
return self.molecule.get_molecular_hamiltonian(occupied_indices=self.active_space.frozen_reference_orbitals,
active_indices=self.active_space.active_orbitals)
else:
return self.molecule.get_molecular_hamiltonian()
def get_integrals(self, two_body_ordering="openfermion"):
"""
Returns
-------
Tuple with:
constant part (nuclear_repulsion + possible integrated parts from active-spaces)
one_body_integrals
two_body_integrals
"""
if self.active_space is not None and len(self.active_space.frozen_reference_orbitals) > 0:
c, h1, h2 = self.molecule.get_active_space_integrals(active_indices=self.active_space.active_orbitals,
occupied_indices=self.active_space.frozen_reference_orbitals)
else:
c = 0.0
h1 = self.molecule.one_body_integrals
h2 = self.molecule.two_body_integrals
c += self.molecule.nuclear_repulsion
h2 = NBodyTensor(h2, ordering="openfermion")
h2 = h2.reorder(to=two_body_ordering).elems
return c, h1, h2
def compute_one_body_integrals(self):
""" convenience function """
c, h1, h2 = self.get_integrals()
return h1
def compute_two_body_integrals(self, two_body_ordering="openfermion"):
""" """
c, h1, h2 = self.get_integrals(two_body_ordering=two_body_ordering)
return h2
def compute_constant_part(self):
c, h1, h2 = self.get_integrals()
return c
def compute_ccsd_amplitudes(self) -> ClosedShellAmplitudes:
""" """
raise Exception("BaseClass Method")
def prepare_reference(self, state=None, *args, **kwargs):
"""
Returns
-------
A tequila circuit object which prepares the reference of this molecule in the chosen transformation
"""
if state is None:
assert self.n_electrons %2 == 0
state = [0]*(self.n_orbitals*2)
for i in range(self.n_electrons):
state[i]=1
reference_state = BitString.from_array(self.transformation.map_state(state=state))
U = prepare_product_state(reference_state)
# prevent trace out in direct wfn simulation
U.n_qubits = self.n_orbitals*2 # adapt when tapered transformations work
return U
def prepare_hardcore_boson_reference(self):
# HF state in the HCB representation (paired electrons)
U = gates.X(target=[i for i in range(self.n_electrons // 2)])
U.n_qubits = self.n_orbitals
return U
def hcb_to_me(self, U=None):
"""
Transform a circuit in the hardcore-boson encoding (HCB)
to the encoding of this molecule
HCB is supposed to be encoded on the first n_orbitals qubits
Parameters
----------
U: HCB circuit (using the alpha qubits)
Returns
-------
"""
if U is None:
U = QCircuit()
# consistency
consistency = [x < self.n_orbitals for x in U.qubits]
if not all(consistency):
warnings.warn(
"hcb_to_me: given circuit is not defined on the first {} qubits. Is this a HCB circuit?".format(
self.n_orbitals))
# map to alpha qubits
alpha_map = {k: self.transformation.up(k) for k in range(self.n_orbitals)}
alpha_U = U.map_qubits(qubit_map=alpha_map)
UX = self.transformation.hcb_to_me()
if UX is None:
raise TequilaException(
"transformation={} has no hcb_to_me function implemented".format(self.transformation))
return alpha_U + UX
def get_pair_specific_indices(self,
pair_info: str = None,
include_singles: bool = True,
general_excitations: bool = True) -> list:
"""
Assuming a pair-specific model, create a pair-specific index list
to be used in make_upccgsd_ansatz(indices = ... )
Excite from a set of references (i) to any pair coming from (i),
i.e. any (i,j)/(j,i). If general excitations are allowed, also
allow excitations from pairs to appendant pairs and reference.
Parameters
----------
pair_info
file or list including information about pair structure
references single number, pair double
example: as file: "0,1,11,11,00,10" (hand over file name)
in file, skip first row assuming some text with information
as list:['0','1`','11','11','00','10']
~> two reference orbitals 0 and 1,
then two orbitals from pair 11, one from 00, one mixed 10
include_singles
include single excitations
general_excitations
allow general excitations
Returns
-------
list of indices with pair-specific ansatz
"""
if pair_info is None:
raise TequilaException("Need to provide some pair information.")
# If pair-information given on file, load (layout see above)
if isinstance(pair_info, str):
pairs = numpy.loadtxt(pair_info, dtype=str, delimiter=",", skiprows=1)
elif isinstance(pair_info, list):
pairs = pair_info
elif not isinstance(pair_info, list):
raise TequilaException("Pair information needs to be contained in a list or filename.")
connect = [[]] * len(pairs)
# determine "connectivity"
generalized = 0
for idx, p in enumerate(pairs):
if len(p) == 1:
connect[idx] = [i for i in range(len(pairs))
if ((len(pairs[i]) == 2) and (str(idx) in pairs[i]))]
elif (len(p) == 2) and general_excitations:
connect[idx] = [i for i in range(len(pairs))
if (((p[0] in pairs[i]) or (p[1] in pairs[i]) or str(i) in p)
and not (i == idx))]
elif len(p) > 2:
raise TequilaException("Invalid reference of pair id.")
# create generating indices from connectivity
indices = []
for i, to in enumerate(connect):
for a in to:
indices.append(((2 * i, 2 * a), (2 * i + 1, 2 * a + 1)))
if include_singles:
indices.append(((2 * i, 2 * a)))
indices.append(((2 * i + 1, 2 * a + 1)))
return indices
def format_excitation_indices(self, idx):
"""
Consistent formatting of excitation indices
idx = [(p0,q0),(p1,q1),...,(pn,qn)]
sorted as: p0<p1<pn and pi<qi
:param idx: list of index tuples describing a single(!) fermionic excitation
:return: tuple-list of index tuples
"""
idx = [tuple(sorted(x)) for x in idx]
idx = sorted(idx, key=lambda x: x[0])
return tuple(idx)
def make_upccgsd_indices(self, key, reference_orbitals=None, *args, **kwargs):
if reference_orbitals is None:
reference_orbitals = [i for i in range(self.n_electrons // 2)]
indices = []
# add doubles in hcb encoding
if hasattr(key, "lower") and key.lower() == "ladder":
# ladder structure of the pair excitations
# ensures local connectivity
indices = [[(n, n + 1)] for n in range(self.n_orbitals - 1)]
elif hasattr(key, "lower") and "g" not in key.lower():
indices = [[(n, m)] for n in reference_orbitals for m in range(self.n_orbitals) if
n < m and m not in reference_orbitals]
elif hasattr(key, "lower") and "g" in key.lower():
indices = [[(n, m)] for n in range(self.n_orbitals) for m in range(self.n_orbitals) if n < m]
else:
raise TequilaException("Unknown recipe: {}".format(key))
indices = [self.format_excitation_indices(idx) for idx in indices]
return indices
def make_hardcore_boson_upccgd_layer(self,
indices: list = "UpCCGD",
label: str = None,
assume_real: bool = True,
*args, **kwargs):
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices.lower())
UD = QCircuit()
for idx in indices:
UD += self.make_hardcore_boson_excitation_gate(indices=idx, angle=(idx, "D", label),
assume_real=assume_real)
return UD
def make_ansatz(self, name:str, *args, **kwargs):
name = name.lower()
if name.strip()=="":
return QCircuit()
if "+" in name:
U = QCircuit()
subparts = name.split("+")
U = self.make_ansatz(name=subparts[0], *args ,**kwargs)
if "include_reference" in kwargs:
kwargs.pop("include_reference")
if "hcb_optimization" in kwargs:
kwargs.pop("hcb_optimization")
for subpart in subparts[1:]:
U += self.make_ansatz(name=subpart, *args, include_reference=False, hcb_optimization=False, **kwargs)
return U
if name=="uccsd":
return self.make_uccsd_ansatz(*args, **kwargs)
elif "d" in name or "s" in name:
return self.make_upccgsd_ansatz(name=name, *args, **kwargs)
else:
raise TequilaException("unknown ansatz with name={}".format(name))
def make_upccgsd_ansatz(self,
include_reference: bool = True,
name: str = "UpCCGSD",
label: str = None,
order: int = None,
assume_real: bool = True,
hcb_optimization: bool = None,
spin_adapt_singles: bool = True,
neglect_z = False,
*args, **kwargs):
"""
UpGCCSD Ansatz similar as described by Lee et. al.
Parameters
----------
include_singles
include singles excitations. Is overwritten if indices are a string (i.e. indices=UpCCGSD will always include singles, UpCCGD will not)
include_reference
include the HF reference state as initial state
indices
pass custom defined set of indices from which the ansatz will be created
List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]
label
An additional label that is set with the variables
default is None and no label will be set: variables names will be
(x, (p,q)) for x in range(order)
with a label the variables will be named
(label, (x, (p,q)))
order
Order of the ansatz (default is 1)
determines how often the ordering gets repeated
parameters of repeating layers are independent
assume_real
assume a real wavefunction (that is always the case if the reference state is real)
reduces potential gradient costs from 4 to 2
Returns
-------
UpGCCSD ansatz
"""
name = name.upper()
if ("A" in name) and neglect_z is None:
neglect_z = True
else:
neglect_z = False
if order is None:
try:
if "-" in name:
order = int(name.split("-")[0])
else:
order = 1
except:
order = 1
indices = self.make_upccgsd_indices(key=name)
# check if the used qubit encoding has a hcb transformation
have_hcb_trafo = self.transformation.hcb_to_me() is not None
# consistency checks for optimization
if have_hcb_trafo and hcb_optimization is None:
hcb_optimization = True
if "HCB" in name:
hcb_optimization = True
if hcb_optimization and not have_hcb_trafo and "HCB" not in name:
raise TequilaException(
"use_hcb={} but transformation={} has no \'hcb_to_me\' function. Try transformation=\'ReorderedJordanWigner\'".format(
hcb_optimization, self.transformation))
if "S" in name and "HCB" in name:
if "HCB" in name and "S" in name:
raise Exception(
"name={}, Singles can't be realized without mapping back to the standard encoding leave S or HCB out of the name".format(
name))
# first layer
if not hcb_optimization:
U = QCircuit()
if include_reference:
U = self.prepare_reference()
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, assume_real=assume_real,
label=(label, 0), spin_adapt_singles=spin_adapt_singles, *args, **kwargs)
else:
U = QCircuit()
if include_reference:
U = self.prepare_hardcore_boson_reference()
U += self.make_hardcore_boson_upccgd_layer(indices=indices, assume_real=assume_real, label=(label, 0),
*args, **kwargs)
if "HCB" not in name:
U = self.hcb_to_me(U=U)
if "S" in name:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=(label, 0),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z, *args, **kwargs)
for k in range(1, order):
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, label=(label, k),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z)
return U
def make_upccgsd_layer(self, indices, include_singles=True, include_doubles=True, assume_real=True, label=None,
spin_adapt_singles: bool = True, angle_transform=None, mix_sd=False, neglect_z=False, *args, **kwargs):
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
angle = (tuple([idx]), "D", label)
if include_doubles:
if "jordanwigner" in self.transformation.name.lower() and not self.transformation.up_then_down:
# we can optimize with qubit excitations for the JW representation
target=[self.transformation.up(idx[0]), self.transformation.up(idx[1]), self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle,
indices=((2 * idx[0], 2 * idx[1]), (2 * idx[0] + 1, 2 * idx[1] + 1)),
assume_real=assume_real, **kwargs)
if include_singles and mix_sd:
U += self.make_upccgsd_singles(indices=[idx], assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
if include_singles and not mix_sd:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
return U
def make_upccgsd_singles(self, indices="UpCCGSD", spin_adapt_singles=True, label=None, angle_transform=None,
assume_real=True, neglect_z=False, *args, **kwargs):
if neglect_z and "jordanwigner" not in self.transformation.name.lower():
raise TequilaException("neglegt-z approximation in UpCCGSD singles needs the (Reversed)JordanWigner representation")
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices)
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
if spin_adapt_singles:
angle = (idx, "S", label)
if angle_transform is not None:
angle = angle_transform(angle)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=targeta, assume_real=assume_real, **kwargs)
U += gates.QubitExcitation(angle=angle, target=targetb, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0], 2 * idx[1])], assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
else:
angle1 = (idx, "SU", label)
angle2 = (idx, "SD", label)
if angle_transform is not None:
angle1 = angle_transform(angle1)
angle2 = angle_transform(angle2)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle1, target=targeta, assume_real=assume_real, *kwargs)
U += gates.QubitExcitation(angle=angle2, target=targetb, assume_real=assume_real, *kwargs)
else:
U += self.make_excitation_gate(angle=angle1, indices=[(2 * idx[0], 2 * idx[1])],
assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle2, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
return U
def make_uccsd_ansatz(self, trotter_steps: int=1,
initial_amplitudes: typing.Union[str, Amplitudes, ClosedShellAmplitudes] = "mp2",
include_reference_ansatz=True,
parametrized=True,
threshold=1.e-8,
add_singles=None,
*args, **kwargs) -> QCircuit:
"""
Parameters
----------
initial_amplitudes :
initial amplitudes given as ManyBodyAmplitudes structure or as string
where 'mp2', 'cc2' or 'ccsd' are possible initializations
include_reference_ansatz :
Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)
parametrized :
Initialize with variables, otherwise with static numbers (Default value = True)
trotter_steps: int :
initial_amplitudes: typing.Union[str :
Amplitudes :
ClosedShellAmplitudes] :
(Default value = "cc2")
Returns
-------
type
Parametrized QCircuit
"""
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2" and add_singles is None:
add_singles=True
elif initial_amplitudes is not None and add_singles is not None:
warnings.warn("make_uccsd_anstatz: add_singles has no effect when explicit amplitudes are passed down", TequilaWarning)
elif add_singles is None:
add_singles=True
if self.n_electrons % 2 != 0:
raise TequilaException("make_uccsd_ansatz currently only for closed shell systems")
nocc = self.n_electrons // 2
nvirt = self.n_orbitals - nocc
Uref = QCircuit()
if include_reference_ansatz:
Uref = self.prepare_reference()
amplitudes = initial_amplitudes
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2":
amplitudes = self.compute_mp2_amplitudes()
elif initial_amplitudes.lower() == "ccsd":
amplitudes = self.compute_ccsd_amplitudes()
else:
try:
amplitudes = self.compute_amplitudes(method=initial_amplitudes.lower())
except Exception as exc:
raise TequilaException(
"{}\nDon't know how to initialize \'{}\' amplitudes".format(exc, initial_amplitudes))
if amplitudes is None:
tia=None
if add_singles: tia=numpy.zeros(shape=[nocc, nvirt])
amplitudes = ClosedShellAmplitudes(
tIjAb=numpy.zeros(shape=[nocc, nocc, nvirt, nvirt]),
tIA=tia)
closed_shell = isinstance(amplitudes, ClosedShellAmplitudes)
indices = {}
if not isinstance(amplitudes, dict):
amplitudes = amplitudes.make_parameter_dictionary(threshold=threshold)
amplitudes = dict(sorted(amplitudes.items(), key=lambda x: numpy.fabs(x[1]), reverse=True))
for key, t in amplitudes.items():
assert (len(key) % 2 == 0)
if not numpy.isclose(t, 0.0, atol=threshold):
if closed_shell:
if len(key) == 2 and add_singles:
# singles
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_a = (2*key[0], 2*key[1])
idx_b = (2*key[0]+1, 2*key[1]+1)
indices[idx_a]=angle
indices[idx_b]=angle
else:
assert len(key)==4
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_abab=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2], 2 * key[3])
indices[idx_abab]=angle
if key[0]!=key[2] and key[1]!=key[3]:
idx_aaaa=(2 * key[0], 2 * key[1], 2 * key[2], 2 * key[3])
idx_bbbb=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2]+1, 2 * key[3]+1)
partner = tuple([key[2], key[1], key[0], key[3]])
anglex=2.0*(t - amplitudes[partner])
if parametrized:
anglex=2.0*(Variable(name=key) - Variable(partner))
indices[idx_aaaa]=anglex
indices[idx_bbbb]=anglex
else:
raise Exception("only closed-shell supported, please assemble yourself .... sorry :-)")
UCCSD = QCircuit()
factor = 1.0 / trotter_steps
for step in range(trotter_steps):
for idx, angle in indices.items():
UCCSD += self.make_excitation_gate(indices=idx, angle=factor * angle)
if hasattr(initial_amplitudes,"lower") and initial_amplitudes.lower()=="mp2" and parametrized and add_singles:
# mp2 has no singles, need to initialize them here (if not parametrized initializling as 0.0 makes no sense though)
UCCSD += self.make_upccgsd_layer(indices="upccsd", include_singles=True, include_doubles=False)
return Uref + UCCSD
def compute_amplitudes(self, method: str, *args, **kwargs):
"""
Compute closed-shell CC amplitudes
Parameters
----------
method :
coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)
Success might depend on backend
got an extra function for MP2
*args :
**kwargs :
Returns
-------
"""
raise TequilaException("compute amplitudes: Needs to be overwritten by backend")
def compute_mp2_amplitudes(self) -> ClosedShellAmplitudes:
"""
Compute closed-shell mp2 amplitudes
.. math::
t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )
:return:
Parameters
----------
Returns
-------
"""
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.molecule.n_electrons // 2 # this is never the active space
ei = fij[:nocc]
ai = fij[nocc:]
abgij = g[nocc:, nocc:, :nocc, :nocc]
amplitudes = abgij * 1.0 / (
ei.reshape(1, 1, -1, 1) + ei.reshape(1, 1, 1, -1) - ai.reshape(-1, 1, 1, 1) - ai.reshape(1, -1, 1, 1))
E = 2.0 * numpy.einsum('abij,abij->', amplitudes, abgij) - numpy.einsum('abji,abij', amplitudes, abgij,
optimize='greedy')
self.molecule.mp2_energy = E + self.molecule.hf_energy
return ClosedShellAmplitudes(tIjAb=numpy.einsum('abij -> ijab', amplitudes, optimize='greedy'))
def compute_cis_amplitudes(self):
"""
Compute the CIS amplitudes of the molecule
"""
@dataclass
class ResultCIS:
""" """
omegas: typing.List[numbers.Real] # excitation energies [omega0, ...]
amplitudes: typing.List[ClosedShellAmplitudes] # corresponding amplitudes [x_{ai}_0, ...]
def __getitem__(self, item):
return (self.omegas[item], self.amplitudes[item])
def __len__(self):
return len(self.omegas)
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.n_alpha_electrons
nvirt = self.n_orbitals - nocc
pairs = []
for i in range(nocc):
for a in range(nocc, nocc + nvirt):
pairs.append((a, i))
M = numpy.ndarray(shape=[len(pairs), len(pairs)])
for xx, x in enumerate(pairs):
eia = fij[x[0]] - fij[x[1]]
a, i = x
for yy, y in enumerate(pairs):
b, j = y
delta = float(y == x)
gpart = 2.0 * g[a, i, b, j] - g[a, i, j, b]
M[xx, yy] = eia * delta + gpart
omega, xvecs = numpy.linalg.eigh(M)
# convert amplitudes to ndarray sorted by excitation energy
nex = len(omega)
amplitudes = []
for ex in range(nex):
t = numpy.ndarray(shape=[nvirt, nocc])
exvec = xvecs[ex]
for xx, x in enumerate(pairs):
a, i = x
t[a - nocc, i] = exvec[xx]
amplitudes.append(ClosedShellAmplitudes(tIA=t))
return ResultCIS(omegas=list(omega), amplitudes=amplitudes)
@property
def rdm1(self):
"""
Returns RMD1 if computed with compute_rdms function before
"""
if self._rdm1 is not None:
return self._rdm1
else:
print("1-RDM has not been computed. Return None for 1-RDM.")
return None
@property
def rdm2(self):
"""
Returns RMD2 if computed with compute_rdms function before
This is returned in Dirac (physics) notation by default (can be changed in compute_rdms with keyword)!
"""
if self._rdm2 is not None:
return self._rdm2
else:
print("2-RDM has not been computed. Return None for 2-RDM.")
return None
def compute_rdms(self, U: QCircuit = None, variables: Variables = None, spin_free: bool = True,
get_rdm1: bool = True, get_rdm2: bool = True, ordering="dirac"):
"""
Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given
a unitary U. This method uses the standard ordering in physics as denoted below.
Note, that the representation of the density matrices depends on the qubit transformation
used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density
matrices in the occupation picture.
We only consider real orbitals and thus real-valued RDMs.
The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.
.. math :
\\text{rdm1: } \\gamma^p_q = \\langle \\psi | a^p a_q | \\psi \\rangle
= \\langle U 0 | a^p a_q | U 0 \\rangle
\\text{rdm2: } \\gamma^{pq}_{rs} = \\langle \\psi | a^p a^q a_s a_r | \\psi \\rangle
= \\langle U 0 | a^p a^q a_s a_r | U 0 \\rangle
Parameters
----------
U :
Quantum Circuit to achieve the desired state \\psi = U |0\\rangle, non-optional
variables :
If U is parametrized, then need to hand over a set of fixed variables
spin_free :
Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals
get_rdm1, get_rdm2 :
Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,
it is recommended to compute them at once.
Returns
-------
"""
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
# Check whether transformation is BKSF.
# Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct
# transformation, because it computes the number of qubits incorrectly in this case.
# A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet.")
# Set up number of spin-orbitals and molecular orbitals respectively
n_SOs = 2 * self.n_orbitals
n_MOs = self.n_orbitals
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
def _get_of_op(operator_tuple):
""" Returns operator given by a operator tuple as OpenFermion - Fermion operator """
op = openfermion.FermionOperator(operator_tuple)
return op
def _get_qop_hermitian(of_operator) -> QubitHamiltonian:
""" Returns Hermitian part of Fermion operator as QubitHamiltonian """
qop = self.transformation(of_operator)
#qop = QubitHamiltonian(self.transformation(of_operator))
real, imag = qop.split(hermitian=True)
if real:
return real
elif not real:
raise TequilaException(
"Qubit Hamiltonian does not have a Hermitian part. Operator ={}".format(of_operator))
def _build_1bdy_operators_spinful() -> list:
""" Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp
ops = []
for p in range(n_SOs):
for q in range(p + 1):
op_tuple = ((p, 1), (q, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinful() -> list:
""" Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = -pqsr = -qprs = qpsr
# and = rspq
ops = []
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
op_tuple = ((p, 1), (q, 1), (s, 0), (r, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_1bdy_operators_spinfree() -> list:
""" Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp (not changed by spin-summation)
ops = []
for p in range(n_MOs):
for q in range(p + 1):
# Spin aa
op_tuple = ((2 * p, 1), (2 * q, 0))
op = _get_of_op(op_tuple)
# Spin bb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 0))
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinfree() -> list:
""" Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out)
# and = rspq
ops = []
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
# Spin aaaa
op_tuple = ((2 * p, 1), (2 * q, 1), (2 * s, 0), (2 * r, 0)) if (p != q and r != s) else '0.0 []'
op = _get_of_op(op_tuple)
# Spin abab
op_tuple = ((2 * p, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r, 0)) if (
2 * p != 2 * q + 1 and 2 * r != 2 * s + 1) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin baba
op_tuple = ((2 * p + 1, 1), (2 * q, 1), (2 * s, 0), (2 * r + 1, 0)) if (
2 * p + 1 != 2 * q and 2 * r + 1 != 2 * s) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin bbbb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r + 1, 0)) if (
p != q and r != s) else '0.0 []'
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _assemble_rdm1(evals) -> numpy.ndarray:
"""
Returns spin-ful or spin-free one-particle RDM built by symmetry conditions
Same symmetry with or without spin, so we can use the same function
"""
N = n_MOs if spin_free else n_SOs
rdm1 = numpy.zeros([N, N])
ctr: int = 0
for p in range(N):
for q in range(p + 1):
rdm1[p, q] = evals[ctr]
# Symmetry pq = qp
rdm1[q, p] = rdm1[p, q]
ctr += 1
return rdm1
def _assemble_rdm2_spinful(evals) -> numpy.ndarray:
""" Returns spin-ful two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_SOs, n_SOs, n_SOs, n_SOs])
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetries due to anticommutation relations
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
rdm2[p, q, s, r] = -1 * rdm2[p, q, r, s] # pqrs = -pqsr
rdm2[q, p, r, s] = -1 * rdm2[p, q, r, s] # pqrs = -qprs
rdm2[q, p, s, r] = rdm2[p, q, r, s] # pqrs = qpsr
return rdm2
def _assemble_rdm2_spinfree(evals) -> numpy.ndarray:
""" Returns spin-free two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetry: pqrs = qpsr
for p, q, r, s in product(range(n_MOs), repeat=4):
if p >= q or r >= s:
rdm2[q, p, s, r] = rdm2[p, q, r, s]
return rdm2
# Build operator lists
qops = []
if spin_free:
qops += _build_1bdy_operators_spinfree() if get_rdm1 else []
qops += _build_2bdy_operators_spinfree() if get_rdm2 else []
else:
qops += _build_1bdy_operators_spinful() if get_rdm1 else []
qops += _build_2bdy_operators_spinful() if get_rdm2 else []
# Transform operator lists to QubitHamiltonians
qops = [_get_qop_hermitian(op) for op in qops]
# Compute expected values
evals = simulate(ExpectationValue(H=qops, U=U, shape=[len(qops)]), variables=variables)
# Assemble density matrices
# If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type
def _reset_rdm(rdm):
if rdm is not None:
if spin_free and rdm.shape[0] != n_MOs:
return None
if not spin_free and rdm.shape[0] != n_SOs:
return None
return rdm
self._rdm1 = _reset_rdm(self._rdm1)
self._rdm2 = _reset_rdm(self._rdm2)
# Split expectation values in 1- and 2-particle expectation values
if get_rdm1:
len_1 = n_MOs * (n_MOs + 1) // 2 if spin_free else n_SOs * (n_SOs + 1) // 2
else:
len_1 = 0
evals_1, evals_2 = evals[:len_1], evals[len_1:]
# Build matrices using the expectation values
self._rdm1 = _assemble_rdm1(evals_1) if get_rdm1 else self._rdm1
if spin_free:
self._rdm2 = _assemble_rdm2_spinfree(evals_2) if get_rdm2 else self._rdm2
else:
self._rdm2 = _assemble_rdm2_spinful(evals_2) if get_rdm2 else self._rdm2
if get_rdm2:
rdm2 = NBodyTensor(elems=self.rdm2, ordering="dirac")
rdm2.reorder(to=ordering)
rdm2 = rdm2.elems
self._rdm2 = rdm2
if get_rdm1:
if get_rdm2:
return self.rdm1, self.rdm2
else:
return self.rdm1
elif get_rdm2:
return self.rdm2
else:
warnings.warn("compute_rdms called with instruction to not compute?", TequilaWarning)
def rdm_spinsum(self, sum_rdm1: bool = True, sum_rdm2: bool = True) -> tuple:
"""
Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.
Parameters
----------
sum_rdm1, sum_rdm2 :
If set to true, perform spin summation on rdm1, rdm2
Returns
-------
rdm1_spinsum, rdm2_spinsum :
The desired spin-free matrices
"""
n_MOs = self.n_orbitals
rdm1_spinsum = None
rdm2_spinsum = None
# Spin summation on rdm1
if sum_rdm1:
# Check whether spin-rdm2 exists
if self._rdm1 is None:
raise TequilaException("The spin-RDM for the 1-RDM does not exist!")
# Check whether existing rdm1 is in spin-orbital basis
if self._rdm1.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm1_spinsum = numpy.zeros([n_MOs, n_MOs])
for p in range(n_MOs):
for q in range(p + 1):
rdm1_spinsum[p, q] += self._rdm1[2 * p, 2 * q]
rdm1_spinsum[p, q] += self._rdm1[2 * p + 1, 2 * q + 1]
for p in range(n_MOs):
for q in range(p):
rdm1_spinsum[q, p] = rdm1_spinsum[p, q]
# Spin summation on rdm2
if sum_rdm2:
# Check whether spin-rdm2 exists
if self._rdm2 is None:
raise TequilaException("The spin-RDM for the 2-RDM does not exist!")
# Check whether existing rdm2 is in spin-orbital basis
if self._rdm2.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm2_spinsum = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q, 2 * r, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1]
return rdm1_spinsum, rdm2_spinsum
def perturbative_f12_correction(self, rdm1: numpy.ndarray = None, rdm2: numpy.ndarray = None,
gamma: float = 1.4, n_ri: int = None,
external_info: dict = None, **kwargs) -> float:
"""
Computes the spin-free [2]_R12 correction, needing only the 1- and 2-RDM of a reference method
Requires either 1-RDM, 2-RDM or information to compute them in kwargs
Parameters
----------
rdm1 :
1-electron reduced density matrix
rdm2 :
2-electron reduced density matrix
gamma :
f12-exponent, for a correlation factor f_12 = -1/gamma * exp[-gamma*r_12]
n_ri :
dimensionality of RI-basis; specify only, if want to truncate available RI-basis
if None, then the maximum available via tensors / basis-set is used
must not be larger than size of available RI-basis, and not smaller than size of OBS
for n_ri==dim(OBS), the correction returns zero
external_info :
for usage in qc_base, need to provide information where to find one-body tensor f12-tensor <rs|f_12|pq>;
pass dictionary with {"f12_filename": where to find f12-tensor, "scheme": ordering scheme of tensor}
kwargs :
e.g. RDM-information via {"U": QCircuit, "variables": optimal angles}, needs to be passed if rdm1,rdm2 not
yet computed
Returns
-------
the f12 correction for the energy
"""
from .f12_corrections._f12_correction_base import ExplicitCorrelationCorrection
correction = ExplicitCorrelationCorrection(mol=self, rdm1=rdm1, rdm2=rdm2, gamma=gamma,
n_ri=n_ri, external_info=external_info, **kwargs)
return correction.compute()
def __str__(self) -> str:
result = str(type(self)) + "\n"
result += "Qubit Encoding\n"
result += str(self.transformation) + "\n\n"
result += "Parameters\n"
for k, v in self.parameters.__dict__.items():
result += "{key:15} : {value:15} \n".format(key=str(k), value=str(v))
result += "\n"
return result
|
goal_conditions_for_demo
|
Infer the goal conditions of a single demonstration.
Args
----
demo: the demonstration to infer the goal of.
behavior: check the behavior to remove conflicting conditions.
Returns
-------
goals: list of the goals inferred in the demonstration.
|
"""Module containing methods that allow to identify task goals."""
# Copyright (c) 2022, ABB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with
# or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the
# distribution.
# * Neither the name of ABB nor the names of its
# contributors may be used to endorse or promote
# products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Any, List
from behaviors.common_behaviors import RSequence
from bt_learning.learning_from_demo.constraints_identification import contains_conflicting
from bt_learning.learning_from_demo.demonstration import Demonstration
from py_trees.trees import BehaviourTree
# MASKED: goal_conditions_for_demo function (lines 42-66)
def goal_tree(
goals: List[str],
behaviors: Any,
world_interface: Any
) -> BehaviourTree:
"""
Construct a Behavior Tree strarting from the goals.
Args
----
goals: list of all goals inferred from the demonstration.
behaviors: behavior in the demontration, as defined in robot_behaviors package.
world_interface: interface to the robot.
Returns
-------
tree: a Behavior Tree of goal conditions.
"""
tree = RSequence()
for goal in goals:
node, _ = behaviors.get_node_from_string(goal, world_interface, None)
tree.add_child(node)
return tree
|
def goal_conditions_for_demo(
demo: Demonstration,
behaviors: Any
) -> List[str]:
"""
Infer the goal conditions of a single demonstration.
Args
----
demo: the demonstration to infer the goal of.
behavior: check the behavior to remove conflicting conditions.
Returns
-------
goals: list of the goals inferred in the demonstration.
"""
goals = []
for i in range(len(demo)-1, -1, -1):
for condition in demo[i].postconditions():
if condition not in goals and not contains_conflicting(behaviors, goals, condition):
goals.append(condition)
goals.reverse()
return goals
| 42
| 66
|
"""Module containing methods that allow to identify task goals."""
# Copyright (c) 2022, ABB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with
# or without modification, are permitted provided that
# the following conditions are met:
#
# * Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the
# distribution.
# * Neither the name of ABB nor the names of its
# contributors may be used to endorse or promote
# products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from typing import Any, List
from behaviors.common_behaviors import RSequence
from bt_learning.learning_from_demo.constraints_identification import contains_conflicting
from bt_learning.learning_from_demo.demonstration import Demonstration
from py_trees.trees import BehaviourTree
def goal_conditions_for_demo(
demo: Demonstration,
behaviors: Any
) -> List[str]:
"""
Infer the goal conditions of a single demonstration.
Args
----
demo: the demonstration to infer the goal of.
behavior: check the behavior to remove conflicting conditions.
Returns
-------
goals: list of the goals inferred in the demonstration.
"""
goals = []
for i in range(len(demo)-1, -1, -1):
for condition in demo[i].postconditions():
if condition not in goals and not contains_conflicting(behaviors, goals, condition):
goals.append(condition)
goals.reverse()
return goals
def goal_tree(
goals: List[str],
behaviors: Any,
world_interface: Any
) -> BehaviourTree:
"""
Construct a Behavior Tree strarting from the goals.
Args
----
goals: list of all goals inferred from the demonstration.
behaviors: behavior in the demontration, as defined in robot_behaviors package.
world_interface: interface to the robot.
Returns
-------
tree: a Behavior Tree of goal conditions.
"""
tree = RSequence()
for goal in goals:
node, _ = behaviors.get_node_from_string(goal, world_interface, None)
tree.add_child(node)
return tree
|
_save_custom_objects
|
Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
|
"""
The ``mlflow.keras`` module provides an API for logging and loading Keras models. This module
exports Keras models with the following flavors:
Keras (native) format
This is the main flavor that can be loaded back into Keras.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
import importlib
import os
import yaml
import gorilla
import tempfile
import shutil
import pandas as pd
from distutils.version import LooseVersion
from mlflow import pyfunc
from mlflow.models import Model
import mlflow.tracking
from mlflow.exceptions import MlflowException
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.utils.annotations import experimental
from mlflow.utils.autologging_utils import try_mlflow_log, log_fn_args_as_params
FLAVOR_NAME = "keras"
# File name to which custom objects cloudpickle is saved - used during save and load
_CUSTOM_OBJECTS_SAVE_PATH = "custom_objects.cloudpickle"
_KERAS_MODULE_SPEC_PATH = "keras_module.txt"
# File name to which keras model is saved
_MODEL_SAVE_PATH = "model.h5"
# Conda env subpath when saving/loading model
_CONDA_ENV_SUBPATH = "conda.yaml"
def get_default_conda_env(include_cloudpickle=False, keras_module=None):
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
import tensorflow as tf
conda_deps = [] # if we use tf.keras we only need to declare dependency on tensorflow
pip_deps = []
if keras_module is None:
import keras
keras_module = keras
if keras_module.__name__ == "keras":
# Temporary fix: the created conda environment has issues installing keras >= 2.3.1
if LooseVersion(keras_module.__version__) < LooseVersion('2.3.1'):
conda_deps.append("keras=={}".format(keras_module.__version__))
else:
pip_deps.append("keras=={}".format(keras_module.__version__))
if include_cloudpickle:
import cloudpickle
pip_deps.append("cloudpickle=={}".format(cloudpickle.__version__))
# Temporary fix: conda-forge currently does not have tensorflow > 1.14
# The Keras pyfunc representation requires the TensorFlow
# backend for Keras. Therefore, the conda environment must
# include TensorFlow
if LooseVersion(tf.__version__) <= LooseVersion('1.13.2'):
conda_deps.append("tensorflow=={}".format(tf.__version__))
else:
pip_deps.append("tensorflow=={}".format(tf.__version__))
return _mlflow_conda_env(
additional_conda_deps=conda_deps,
additional_pip_deps=pip_deps,
additional_conda_channels=None)
def save_model(keras_model, path, conda_env=None, mlflow_model=None, custom_objects=None,
keras_module=None,
signature: ModelSignature = None, input_example: ModelInputExample = None,
**kwargs):
"""
Save a Keras model to a path on the local file system.
:param keras_model: Keras model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the
dependencies contained in :func:`get_default_conda_env()`. If
``None``, the default :func:`get_default_conda_env()` environment is
added to the model. The following is an *example* dictionary
representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param mlflow_model: MLflow model config this flavor is being added to.
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
.. code-block:: python
:caption: Example
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model_path = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Save the model as an MLflow Model
mlflow.keras.save_model(keras_model, keras_model_path)
"""
if keras_module is None:
def _is_plain_keras(model):
try:
# NB: Network is the first parent with save method
import keras.engine.network
return isinstance(model, keras.engine.network.Network)
except ImportError:
return False
def _is_tf_keras(model):
try:
# NB: Network is not exposed in tf.keras, we check for Model instead.
import tensorflow.keras.models
return isinstance(model, tensorflow.keras.models.Model)
except ImportError:
return False
if _is_plain_keras(keras_model):
keras_module = importlib.import_module("keras")
elif _is_tf_keras(keras_model):
keras_module = importlib.import_module("tensorflow.keras")
else:
raise MlflowException("Unable to infer keras module from the model, please specify "
"which keras module ('keras' or 'tensorflow.keras') is to be "
"used to save and load the model.")
elif type(keras_module) == str:
keras_module = importlib.import_module(keras_module)
# check if path exists
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
# construct new data folder in existing path
data_subpath = "data"
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
# save custom objects if there are custom objects
if custom_objects is not None:
_save_custom_objects(data_path, custom_objects)
# save keras module spec to path/data/keras_module.txt
with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), "w") as f:
f.write(keras_module.__name__)
# save keras model to path/data/model.h5
model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH)
model_path = os.path.join(path, model_subpath)
if path.startswith('/dbfs/'):
# The Databricks Filesystem uses a FUSE implementation that does not support
# random writes. It causes an error.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
keras_model.save(f.name, **kwargs)
f.flush() # force flush the data
shutil.copyfile(src=f.name, dst=model_path)
else:
keras_model.save(model_path, **kwargs)
# update flavor info to mlflow_model
mlflow_model.add_flavor(FLAVOR_NAME,
keras_module=keras_module.__name__,
keras_version=keras_module.__version__,
data=data_subpath)
# save conda.yaml info to path/conda.yml
if conda_env is None:
conda_env = get_default_conda_env(include_cloudpickle=custom_objects is not None,
keras_module=keras_module)
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, _CONDA_ENV_SUBPATH), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# append loader_module, data and env data to mlflow_model
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.keras",
data=data_subpath, env=_CONDA_ENV_SUBPATH)
# save mlflow_model to path/MLmodel
mlflow_model.save(os.path.join(path, "MLmodel"))
def log_model(keras_model, artifact_path, conda_env=None, custom_objects=None, keras_module=None,
registered_model_name=None, signature: ModelSignature=None,
input_example: ModelInputExample=None, **kwargs):
"""
Log a Keras model as an MLflow artifact for the current run.
:param keras_model: Keras model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: Either a dictionary representation of a Conda environment or
the path to a Conda environment yaml file.
If provided, this describes the environment this model should be
run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`mlflow.keras.get_default_conda_env()` environment is added to
the model. The following is an *example* dictionary representation of a
Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
.. code-block:: python
:caption: Example
from keras import Dense, layers
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Log metrics and log the model
with mlflow.start_run() as run:
mlflow.keras.log_model(keras_model, "models")
"""
Model.log(artifact_path=artifact_path, flavor=mlflow.keras,
keras_model=keras_model, conda_env=conda_env, custom_objects=custom_objects,
keras_module=keras_module, registered_model_name=registered_model_name,
signature=signature, input_example=input_example,
**kwargs)
# MASKED: _save_custom_objects function (lines 318-333)
def _load_model(model_path, keras_module, **kwargs):
keras_models = importlib.import_module(keras_module.__name__ + ".models")
custom_objects = kwargs.pop("custom_objects", {})
custom_objects_path = None
if os.path.isdir(model_path):
if os.path.isfile(os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)):
custom_objects_path = os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)
model_path = os.path.join(model_path, _MODEL_SAVE_PATH)
if custom_objects_path is not None:
import cloudpickle
with open(custom_objects_path, "rb") as in_f:
pickled_custom_objects = cloudpickle.load(in_f)
pickled_custom_objects.update(custom_objects)
custom_objects = pickled_custom_objects
from distutils.version import StrictVersion
if StrictVersion(keras_module.__version__.split('-')[0]) >= StrictVersion("2.2.3"):
# NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead
# of string to avoid issues.
import h5py
with h5py.File(os.path.abspath(model_path), "r") as model_path:
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
else:
# NOTE: Older versions of Keras only handle filepath.
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
class _KerasModelWrapper:
def __init__(self, keras_model, graph, sess):
self.keras_model = keras_model
self._graph = graph
self._sess = sess
def predict(self, dataframe):
# In TensorFlow < 2.0, we use a graph and session to predict
if self._graph is not None:
with self._graph.as_default():
with self._sess.as_default():
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
# In TensorFlow >= 2.0, we do not use a graph and session to predict
else:
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
predicted.index = dataframe.index
return predicted
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.
"""
import tensorflow as tf
if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)):
with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH), "r") as f:
keras_module = importlib.import_module(f.read())
else:
import keras
keras_module = keras
K = importlib.import_module(keras_module.__name__ + ".backend")
if keras_module.__name__ == "tensorflow.keras" or K.backend() == 'tensorflow':
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
graph = tf.Graph()
sess = tf.Session(graph=graph)
# By default tf backed models depend on the global graph and session.
# We create an use new Graph and Session and store them with the model
# This way the model is independent on the global state.
with graph.as_default():
with sess.as_default(): # pylint:disable=not-context-manager
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, graph, sess)
else:
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, None, None)
else:
raise MlflowException("Unsupported backend '%s'" % K._BACKEND)
def load_model(model_uri, **kwargs):
"""
Load a Keras model from a local file or a run.
Extra arguments are passed through to keras.load_model.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:return: A Keras model instance.
.. code-block:: python
:caption: Example
# Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame
keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models")
predictions = keras_model.predict(x_test)
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
keras_module = importlib.import_module(flavor_conf.get("keras_module", "keras"))
keras_model_artifacts_path = os.path.join(
local_model_path,
flavor_conf.get("data", _MODEL_SAVE_PATH))
return _load_model(model_path=keras_model_artifacts_path, keras_module=keras_module, **kwargs)
@experimental
def autolog():
# pylint: disable=E0611
"""
Enables automatic logging from Keras to MLflow. Autologging captures the following information:
**Metrics** and **Parameters**
- Training loss; validation loss; user-specified metrics
- Metrics associated with the ``EarlyStopping`` callbacks: ``stopped_epoch``,
``restored_epoch``, ``restore_best_weight``, ``last_epoch``, etc
- ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon
- ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``: ``min_delta``,
``patience``, ``baseline``, ``restore_best_weights``, etc
**Artifacts**
- Model summary on training start
- `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model) on training end
.. code-block:: python
:caption: Example
import mlflow
import mlflow.keras
# Build, compile, enable autologging, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
# autolog your metrics, parameters, and model
mlflow.keras.autolog()
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size=128, validation_data=(x_val, y_val))
``EarlyStopping Integration with Keras AutoLogging``
MLflow will detect if an ``EarlyStopping`` callback is used in a ``fit()`` or
``fit_generator()`` call, and if the ``restore_best_weights`` parameter is set to be ``True``,
then MLflow will log the metrics associated with the restored model as a final, extra step.
The epoch of the restored model will also be logged as the metric ``restored_epoch``.
This allows for easy comparison between the actual metrics of the restored model and
the metrics of other models.
If ``restore_best_weights`` is set to be ``False``, then MLflow will not log an additional step.
Regardless of ``restore_best_weights``, MLflow will also log ``stopped_epoch``,
which indicates the epoch at which training stopped due to early stopping.
If training does not end due to early stopping, then ``stopped_epoch`` will be logged as ``0``.
MLflow will also log the parameters of the ``EarlyStopping`` callback,
excluding ``mode`` and ``verbose``.
"""
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
"""
Callback for auto-logging metrics and parameters.
Records available logs after each epoch.
Records model structural information as params when training begins
"""
def on_train_begin(self, logs=None): # pylint: disable=unused-argument
try_mlflow_log(mlflow.log_param, 'num_layers', len(self.model.layers))
try_mlflow_log(mlflow.log_param, 'optimizer_name', type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, 'lr'):
lr = self.model.optimizer.lr if \
type(self.model.optimizer.lr) is float \
else keras.backend.eval(self.model.optimizer.lr)
try_mlflow_log(mlflow.log_param, 'learning_rate', lr)
if hasattr(self.model.optimizer, 'epsilon'):
epsilon = self.model.optimizer.epsilon if \
type(self.model.optimizer.epsilon) is float \
else keras.backend.eval(self.model.optimizer.epsilon)
try_mlflow_log(mlflow.log_param, 'epsilon', epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
tempdir = tempfile.mkdtemp()
try:
summary_file = os.path.join(tempdir, "model_summary.txt")
with open(summary_file, 'w') as f:
f.write(summary)
try_mlflow_log(mlflow.log_artifact, local_path=summary_file)
finally:
shutil.rmtree(tempdir)
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None):
try_mlflow_log(log_model, self.model, artifact_path='model')
# As of Keras 2.4.0, Keras Callback implementations must define the following
# methods indicating whether or not the callback overrides functions for
# batch training/testing/inference
def _implements_train_batch_hooks(self): return False
def _implements_test_batch_hooks(self): return False
def _implements_predict_batch_hooks(self): return False
def _early_stop_check(callbacks):
if LooseVersion(keras.__version__) < LooseVersion('2.3.0'):
es_callback = keras.callbacks.EarlyStopping
else:
es_callback = keras.callbacks.callbacks.EarlyStopping
for callback in callbacks:
if isinstance(callback, es_callback):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {'monitor': callback.monitor,
'min_delta': callback.min_delta,
'patience': callback.patience,
'baseline': callback.baseline,
'restore_best_weights': callback.restore_best_weights}
try_mlflow_log(mlflow.log_params, earlystopping_params)
except Exception: # pylint: disable=W0703
return
def _get_early_stop_callback_attrs(callback):
try:
return callback.stopped_epoch, callback.restore_best_weights, callback.patience
except Exception: # pylint: disable=W0703
return None
def _log_early_stop_callback_metrics(callback, history):
if callback:
callback_attrs = _get_early_stop_callback_attrs(callback)
if callback_attrs is None:
return
stopped_epoch, restore_best_weights, patience = callback_attrs
try_mlflow_log(mlflow.log_metric, 'stopped_epoch', stopped_epoch)
# Weights are restored only if early stopping occurs
if stopped_epoch != 0 and restore_best_weights:
restored_epoch = stopped_epoch - max(1, patience)
try_mlflow_log(mlflow.log_metric, 'restored_epoch', restored_epoch)
restored_metrics = {key: history.history[key][restored_epoch]
for key in history.history.keys()}
# Checking that a metric history exists
metric_key = next(iter(history.history), None)
if metric_key is not None:
last_epoch = len(history.history[metric_key])
try_mlflow_log(mlflow.log_metrics, restored_metrics, step=last_epoch)
def _run_and_log_function(self, original, args, kwargs, unlogged_params, callback_arg_index):
if not mlflow.active_run():
try_mlflow_log(mlflow.start_run)
auto_end_run = True
else:
auto_end_run = False
log_fn_args_as_params(original, args, kwargs, unlogged_params)
early_stop_callback = None
# Checking if the 'callback' argument of the function is set
if len(args) > callback_arg_index:
tmp_list = list(args)
early_stop_callback = _early_stop_check(tmp_list[callback_arg_index])
tmp_list[callback_arg_index] += [__MLflowKerasCallback()]
args = tuple(tmp_list)
elif 'callbacks' in kwargs:
early_stop_callback = _early_stop_check(kwargs['callbacks'])
kwargs['callbacks'] += [__MLflowKerasCallback()]
else:
kwargs['callbacks'] = [__MLflowKerasCallback()]
_log_early_stop_callback_params(early_stop_callback)
history = original(self, *args, **kwargs)
_log_early_stop_callback_metrics(early_stop_callback, history)
if auto_end_run:
try_mlflow_log(mlflow.end_run)
return history
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit')
unlogged_params = ['self', 'x', 'y', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 5)
@gorilla.patch(keras.Model)
def fit_generator(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit_generator')
unlogged_params = ['self', 'generator', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 4)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
gorilla.apply(gorilla.Patch(keras.Model, 'fit', fit, settings=settings))
gorilla.apply(gorilla.Patch(keras.Model, 'fit_generator', fit_generator, settings=settings))
|
def _save_custom_objects(path, custom_objects):
"""
Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
"""
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, "wb") as out_f:
cloudpickle.dump(custom_objects, out_f)
| 318
| 333
|
"""
The ``mlflow.keras`` module provides an API for logging and loading Keras models. This module
exports Keras models with the following flavors:
Keras (native) format
This is the main flavor that can be loaded back into Keras.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
import importlib
import os
import yaml
import gorilla
import tempfile
import shutil
import pandas as pd
from distutils.version import LooseVersion
from mlflow import pyfunc
from mlflow.models import Model
import mlflow.tracking
from mlflow.exceptions import MlflowException
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.utils.annotations import experimental
from mlflow.utils.autologging_utils import try_mlflow_log, log_fn_args_as_params
FLAVOR_NAME = "keras"
# File name to which custom objects cloudpickle is saved - used during save and load
_CUSTOM_OBJECTS_SAVE_PATH = "custom_objects.cloudpickle"
_KERAS_MODULE_SPEC_PATH = "keras_module.txt"
# File name to which keras model is saved
_MODEL_SAVE_PATH = "model.h5"
# Conda env subpath when saving/loading model
_CONDA_ENV_SUBPATH = "conda.yaml"
def get_default_conda_env(include_cloudpickle=False, keras_module=None):
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
import tensorflow as tf
conda_deps = [] # if we use tf.keras we only need to declare dependency on tensorflow
pip_deps = []
if keras_module is None:
import keras
keras_module = keras
if keras_module.__name__ == "keras":
# Temporary fix: the created conda environment has issues installing keras >= 2.3.1
if LooseVersion(keras_module.__version__) < LooseVersion('2.3.1'):
conda_deps.append("keras=={}".format(keras_module.__version__))
else:
pip_deps.append("keras=={}".format(keras_module.__version__))
if include_cloudpickle:
import cloudpickle
pip_deps.append("cloudpickle=={}".format(cloudpickle.__version__))
# Temporary fix: conda-forge currently does not have tensorflow > 1.14
# The Keras pyfunc representation requires the TensorFlow
# backend for Keras. Therefore, the conda environment must
# include TensorFlow
if LooseVersion(tf.__version__) <= LooseVersion('1.13.2'):
conda_deps.append("tensorflow=={}".format(tf.__version__))
else:
pip_deps.append("tensorflow=={}".format(tf.__version__))
return _mlflow_conda_env(
additional_conda_deps=conda_deps,
additional_pip_deps=pip_deps,
additional_conda_channels=None)
def save_model(keras_model, path, conda_env=None, mlflow_model=None, custom_objects=None,
keras_module=None,
signature: ModelSignature = None, input_example: ModelInputExample = None,
**kwargs):
"""
Save a Keras model to a path on the local file system.
:param keras_model: Keras model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the
dependencies contained in :func:`get_default_conda_env()`. If
``None``, the default :func:`get_default_conda_env()` environment is
added to the model. The following is an *example* dictionary
representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param mlflow_model: MLflow model config this flavor is being added to.
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
.. code-block:: python
:caption: Example
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model_path = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Save the model as an MLflow Model
mlflow.keras.save_model(keras_model, keras_model_path)
"""
if keras_module is None:
def _is_plain_keras(model):
try:
# NB: Network is the first parent with save method
import keras.engine.network
return isinstance(model, keras.engine.network.Network)
except ImportError:
return False
def _is_tf_keras(model):
try:
# NB: Network is not exposed in tf.keras, we check for Model instead.
import tensorflow.keras.models
return isinstance(model, tensorflow.keras.models.Model)
except ImportError:
return False
if _is_plain_keras(keras_model):
keras_module = importlib.import_module("keras")
elif _is_tf_keras(keras_model):
keras_module = importlib.import_module("tensorflow.keras")
else:
raise MlflowException("Unable to infer keras module from the model, please specify "
"which keras module ('keras' or 'tensorflow.keras') is to be "
"used to save and load the model.")
elif type(keras_module) == str:
keras_module = importlib.import_module(keras_module)
# check if path exists
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
# construct new data folder in existing path
data_subpath = "data"
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
# save custom objects if there are custom objects
if custom_objects is not None:
_save_custom_objects(data_path, custom_objects)
# save keras module spec to path/data/keras_module.txt
with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), "w") as f:
f.write(keras_module.__name__)
# save keras model to path/data/model.h5
model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH)
model_path = os.path.join(path, model_subpath)
if path.startswith('/dbfs/'):
# The Databricks Filesystem uses a FUSE implementation that does not support
# random writes. It causes an error.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
keras_model.save(f.name, **kwargs)
f.flush() # force flush the data
shutil.copyfile(src=f.name, dst=model_path)
else:
keras_model.save(model_path, **kwargs)
# update flavor info to mlflow_model
mlflow_model.add_flavor(FLAVOR_NAME,
keras_module=keras_module.__name__,
keras_version=keras_module.__version__,
data=data_subpath)
# save conda.yaml info to path/conda.yml
if conda_env is None:
conda_env = get_default_conda_env(include_cloudpickle=custom_objects is not None,
keras_module=keras_module)
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, _CONDA_ENV_SUBPATH), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# append loader_module, data and env data to mlflow_model
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.keras",
data=data_subpath, env=_CONDA_ENV_SUBPATH)
# save mlflow_model to path/MLmodel
mlflow_model.save(os.path.join(path, "MLmodel"))
def log_model(keras_model, artifact_path, conda_env=None, custom_objects=None, keras_module=None,
registered_model_name=None, signature: ModelSignature=None,
input_example: ModelInputExample=None, **kwargs):
"""
Log a Keras model as an MLflow artifact for the current run.
:param keras_model: Keras model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: Either a dictionary representation of a Conda environment or
the path to a Conda environment yaml file.
If provided, this describes the environment this model should be
run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`mlflow.keras.get_default_conda_env()` environment is added to
the model. The following is an *example* dictionary representation of a
Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
.. code-block:: python
:caption: Example
from keras import Dense, layers
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Log metrics and log the model
with mlflow.start_run() as run:
mlflow.keras.log_model(keras_model, "models")
"""
Model.log(artifact_path=artifact_path, flavor=mlflow.keras,
keras_model=keras_model, conda_env=conda_env, custom_objects=custom_objects,
keras_module=keras_module, registered_model_name=registered_model_name,
signature=signature, input_example=input_example,
**kwargs)
def _save_custom_objects(path, custom_objects):
"""
Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
"""
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, "wb") as out_f:
cloudpickle.dump(custom_objects, out_f)
def _load_model(model_path, keras_module, **kwargs):
keras_models = importlib.import_module(keras_module.__name__ + ".models")
custom_objects = kwargs.pop("custom_objects", {})
custom_objects_path = None
if os.path.isdir(model_path):
if os.path.isfile(os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)):
custom_objects_path = os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)
model_path = os.path.join(model_path, _MODEL_SAVE_PATH)
if custom_objects_path is not None:
import cloudpickle
with open(custom_objects_path, "rb") as in_f:
pickled_custom_objects = cloudpickle.load(in_f)
pickled_custom_objects.update(custom_objects)
custom_objects = pickled_custom_objects
from distutils.version import StrictVersion
if StrictVersion(keras_module.__version__.split('-')[0]) >= StrictVersion("2.2.3"):
# NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead
# of string to avoid issues.
import h5py
with h5py.File(os.path.abspath(model_path), "r") as model_path:
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
else:
# NOTE: Older versions of Keras only handle filepath.
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
class _KerasModelWrapper:
def __init__(self, keras_model, graph, sess):
self.keras_model = keras_model
self._graph = graph
self._sess = sess
def predict(self, dataframe):
# In TensorFlow < 2.0, we use a graph and session to predict
if self._graph is not None:
with self._graph.as_default():
with self._sess.as_default():
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
# In TensorFlow >= 2.0, we do not use a graph and session to predict
else:
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
predicted.index = dataframe.index
return predicted
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.
"""
import tensorflow as tf
if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)):
with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH), "r") as f:
keras_module = importlib.import_module(f.read())
else:
import keras
keras_module = keras
K = importlib.import_module(keras_module.__name__ + ".backend")
if keras_module.__name__ == "tensorflow.keras" or K.backend() == 'tensorflow':
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
graph = tf.Graph()
sess = tf.Session(graph=graph)
# By default tf backed models depend on the global graph and session.
# We create an use new Graph and Session and store them with the model
# This way the model is independent on the global state.
with graph.as_default():
with sess.as_default(): # pylint:disable=not-context-manager
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, graph, sess)
else:
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, None, None)
else:
raise MlflowException("Unsupported backend '%s'" % K._BACKEND)
def load_model(model_uri, **kwargs):
"""
Load a Keras model from a local file or a run.
Extra arguments are passed through to keras.load_model.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:return: A Keras model instance.
.. code-block:: python
:caption: Example
# Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame
keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models")
predictions = keras_model.predict(x_test)
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
keras_module = importlib.import_module(flavor_conf.get("keras_module", "keras"))
keras_model_artifacts_path = os.path.join(
local_model_path,
flavor_conf.get("data", _MODEL_SAVE_PATH))
return _load_model(model_path=keras_model_artifacts_path, keras_module=keras_module, **kwargs)
@experimental
def autolog():
# pylint: disable=E0611
"""
Enables automatic logging from Keras to MLflow. Autologging captures the following information:
**Metrics** and **Parameters**
- Training loss; validation loss; user-specified metrics
- Metrics associated with the ``EarlyStopping`` callbacks: ``stopped_epoch``,
``restored_epoch``, ``restore_best_weight``, ``last_epoch``, etc
- ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon
- ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``: ``min_delta``,
``patience``, ``baseline``, ``restore_best_weights``, etc
**Artifacts**
- Model summary on training start
- `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model) on training end
.. code-block:: python
:caption: Example
import mlflow
import mlflow.keras
# Build, compile, enable autologging, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
# autolog your metrics, parameters, and model
mlflow.keras.autolog()
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size=128, validation_data=(x_val, y_val))
``EarlyStopping Integration with Keras AutoLogging``
MLflow will detect if an ``EarlyStopping`` callback is used in a ``fit()`` or
``fit_generator()`` call, and if the ``restore_best_weights`` parameter is set to be ``True``,
then MLflow will log the metrics associated with the restored model as a final, extra step.
The epoch of the restored model will also be logged as the metric ``restored_epoch``.
This allows for easy comparison between the actual metrics of the restored model and
the metrics of other models.
If ``restore_best_weights`` is set to be ``False``, then MLflow will not log an additional step.
Regardless of ``restore_best_weights``, MLflow will also log ``stopped_epoch``,
which indicates the epoch at which training stopped due to early stopping.
If training does not end due to early stopping, then ``stopped_epoch`` will be logged as ``0``.
MLflow will also log the parameters of the ``EarlyStopping`` callback,
excluding ``mode`` and ``verbose``.
"""
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
"""
Callback for auto-logging metrics and parameters.
Records available logs after each epoch.
Records model structural information as params when training begins
"""
def on_train_begin(self, logs=None): # pylint: disable=unused-argument
try_mlflow_log(mlflow.log_param, 'num_layers', len(self.model.layers))
try_mlflow_log(mlflow.log_param, 'optimizer_name', type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, 'lr'):
lr = self.model.optimizer.lr if \
type(self.model.optimizer.lr) is float \
else keras.backend.eval(self.model.optimizer.lr)
try_mlflow_log(mlflow.log_param, 'learning_rate', lr)
if hasattr(self.model.optimizer, 'epsilon'):
epsilon = self.model.optimizer.epsilon if \
type(self.model.optimizer.epsilon) is float \
else keras.backend.eval(self.model.optimizer.epsilon)
try_mlflow_log(mlflow.log_param, 'epsilon', epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
tempdir = tempfile.mkdtemp()
try:
summary_file = os.path.join(tempdir, "model_summary.txt")
with open(summary_file, 'w') as f:
f.write(summary)
try_mlflow_log(mlflow.log_artifact, local_path=summary_file)
finally:
shutil.rmtree(tempdir)
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None):
try_mlflow_log(log_model, self.model, artifact_path='model')
# As of Keras 2.4.0, Keras Callback implementations must define the following
# methods indicating whether or not the callback overrides functions for
# batch training/testing/inference
def _implements_train_batch_hooks(self): return False
def _implements_test_batch_hooks(self): return False
def _implements_predict_batch_hooks(self): return False
def _early_stop_check(callbacks):
if LooseVersion(keras.__version__) < LooseVersion('2.3.0'):
es_callback = keras.callbacks.EarlyStopping
else:
es_callback = keras.callbacks.callbacks.EarlyStopping
for callback in callbacks:
if isinstance(callback, es_callback):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {'monitor': callback.monitor,
'min_delta': callback.min_delta,
'patience': callback.patience,
'baseline': callback.baseline,
'restore_best_weights': callback.restore_best_weights}
try_mlflow_log(mlflow.log_params, earlystopping_params)
except Exception: # pylint: disable=W0703
return
def _get_early_stop_callback_attrs(callback):
try:
return callback.stopped_epoch, callback.restore_best_weights, callback.patience
except Exception: # pylint: disable=W0703
return None
def _log_early_stop_callback_metrics(callback, history):
if callback:
callback_attrs = _get_early_stop_callback_attrs(callback)
if callback_attrs is None:
return
stopped_epoch, restore_best_weights, patience = callback_attrs
try_mlflow_log(mlflow.log_metric, 'stopped_epoch', stopped_epoch)
# Weights are restored only if early stopping occurs
if stopped_epoch != 0 and restore_best_weights:
restored_epoch = stopped_epoch - max(1, patience)
try_mlflow_log(mlflow.log_metric, 'restored_epoch', restored_epoch)
restored_metrics = {key: history.history[key][restored_epoch]
for key in history.history.keys()}
# Checking that a metric history exists
metric_key = next(iter(history.history), None)
if metric_key is not None:
last_epoch = len(history.history[metric_key])
try_mlflow_log(mlflow.log_metrics, restored_metrics, step=last_epoch)
def _run_and_log_function(self, original, args, kwargs, unlogged_params, callback_arg_index):
if not mlflow.active_run():
try_mlflow_log(mlflow.start_run)
auto_end_run = True
else:
auto_end_run = False
log_fn_args_as_params(original, args, kwargs, unlogged_params)
early_stop_callback = None
# Checking if the 'callback' argument of the function is set
if len(args) > callback_arg_index:
tmp_list = list(args)
early_stop_callback = _early_stop_check(tmp_list[callback_arg_index])
tmp_list[callback_arg_index] += [__MLflowKerasCallback()]
args = tuple(tmp_list)
elif 'callbacks' in kwargs:
early_stop_callback = _early_stop_check(kwargs['callbacks'])
kwargs['callbacks'] += [__MLflowKerasCallback()]
else:
kwargs['callbacks'] = [__MLflowKerasCallback()]
_log_early_stop_callback_params(early_stop_callback)
history = original(self, *args, **kwargs)
_log_early_stop_callback_metrics(early_stop_callback, history)
if auto_end_run:
try_mlflow_log(mlflow.end_run)
return history
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit')
unlogged_params = ['self', 'x', 'y', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 5)
@gorilla.patch(keras.Model)
def fit_generator(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit_generator')
unlogged_params = ['self', 'generator', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 4)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
gorilla.apply(gorilla.Patch(keras.Model, 'fit', fit, settings=settings))
gorilla.apply(gorilla.Patch(keras.Model, 'fit_generator', fit_generator, settings=settings))
|
_load_pyfunc
|
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.
|
"""
The ``mlflow.keras`` module provides an API for logging and loading Keras models. This module
exports Keras models with the following flavors:
Keras (native) format
This is the main flavor that can be loaded back into Keras.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
import importlib
import os
import yaml
import gorilla
import tempfile
import shutil
import pandas as pd
from distutils.version import LooseVersion
from mlflow import pyfunc
from mlflow.models import Model
import mlflow.tracking
from mlflow.exceptions import MlflowException
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.utils.annotations import experimental
from mlflow.utils.autologging_utils import try_mlflow_log, log_fn_args_as_params
FLAVOR_NAME = "keras"
# File name to which custom objects cloudpickle is saved - used during save and load
_CUSTOM_OBJECTS_SAVE_PATH = "custom_objects.cloudpickle"
_KERAS_MODULE_SPEC_PATH = "keras_module.txt"
# File name to which keras model is saved
_MODEL_SAVE_PATH = "model.h5"
# Conda env subpath when saving/loading model
_CONDA_ENV_SUBPATH = "conda.yaml"
def get_default_conda_env(include_cloudpickle=False, keras_module=None):
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
import tensorflow as tf
conda_deps = [] # if we use tf.keras we only need to declare dependency on tensorflow
pip_deps = []
if keras_module is None:
import keras
keras_module = keras
if keras_module.__name__ == "keras":
# Temporary fix: the created conda environment has issues installing keras >= 2.3.1
if LooseVersion(keras_module.__version__) < LooseVersion('2.3.1'):
conda_deps.append("keras=={}".format(keras_module.__version__))
else:
pip_deps.append("keras=={}".format(keras_module.__version__))
if include_cloudpickle:
import cloudpickle
pip_deps.append("cloudpickle=={}".format(cloudpickle.__version__))
# Temporary fix: conda-forge currently does not have tensorflow > 1.14
# The Keras pyfunc representation requires the TensorFlow
# backend for Keras. Therefore, the conda environment must
# include TensorFlow
if LooseVersion(tf.__version__) <= LooseVersion('1.13.2'):
conda_deps.append("tensorflow=={}".format(tf.__version__))
else:
pip_deps.append("tensorflow=={}".format(tf.__version__))
return _mlflow_conda_env(
additional_conda_deps=conda_deps,
additional_pip_deps=pip_deps,
additional_conda_channels=None)
def save_model(keras_model, path, conda_env=None, mlflow_model=None, custom_objects=None,
keras_module=None,
signature: ModelSignature = None, input_example: ModelInputExample = None,
**kwargs):
"""
Save a Keras model to a path on the local file system.
:param keras_model: Keras model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the
dependencies contained in :func:`get_default_conda_env()`. If
``None``, the default :func:`get_default_conda_env()` environment is
added to the model. The following is an *example* dictionary
representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param mlflow_model: MLflow model config this flavor is being added to.
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
.. code-block:: python
:caption: Example
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model_path = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Save the model as an MLflow Model
mlflow.keras.save_model(keras_model, keras_model_path)
"""
if keras_module is None:
def _is_plain_keras(model):
try:
# NB: Network is the first parent with save method
import keras.engine.network
return isinstance(model, keras.engine.network.Network)
except ImportError:
return False
def _is_tf_keras(model):
try:
# NB: Network is not exposed in tf.keras, we check for Model instead.
import tensorflow.keras.models
return isinstance(model, tensorflow.keras.models.Model)
except ImportError:
return False
if _is_plain_keras(keras_model):
keras_module = importlib.import_module("keras")
elif _is_tf_keras(keras_model):
keras_module = importlib.import_module("tensorflow.keras")
else:
raise MlflowException("Unable to infer keras module from the model, please specify "
"which keras module ('keras' or 'tensorflow.keras') is to be "
"used to save and load the model.")
elif type(keras_module) == str:
keras_module = importlib.import_module(keras_module)
# check if path exists
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
# construct new data folder in existing path
data_subpath = "data"
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
# save custom objects if there are custom objects
if custom_objects is not None:
_save_custom_objects(data_path, custom_objects)
# save keras module spec to path/data/keras_module.txt
with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), "w") as f:
f.write(keras_module.__name__)
# save keras model to path/data/model.h5
model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH)
model_path = os.path.join(path, model_subpath)
if path.startswith('/dbfs/'):
# The Databricks Filesystem uses a FUSE implementation that does not support
# random writes. It causes an error.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
keras_model.save(f.name, **kwargs)
f.flush() # force flush the data
shutil.copyfile(src=f.name, dst=model_path)
else:
keras_model.save(model_path, **kwargs)
# update flavor info to mlflow_model
mlflow_model.add_flavor(FLAVOR_NAME,
keras_module=keras_module.__name__,
keras_version=keras_module.__version__,
data=data_subpath)
# save conda.yaml info to path/conda.yml
if conda_env is None:
conda_env = get_default_conda_env(include_cloudpickle=custom_objects is not None,
keras_module=keras_module)
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, _CONDA_ENV_SUBPATH), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# append loader_module, data and env data to mlflow_model
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.keras",
data=data_subpath, env=_CONDA_ENV_SUBPATH)
# save mlflow_model to path/MLmodel
mlflow_model.save(os.path.join(path, "MLmodel"))
def log_model(keras_model, artifact_path, conda_env=None, custom_objects=None, keras_module=None,
registered_model_name=None, signature: ModelSignature=None,
input_example: ModelInputExample=None, **kwargs):
"""
Log a Keras model as an MLflow artifact for the current run.
:param keras_model: Keras model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: Either a dictionary representation of a Conda environment or
the path to a Conda environment yaml file.
If provided, this describes the environment this model should be
run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`mlflow.keras.get_default_conda_env()` environment is added to
the model. The following is an *example* dictionary representation of a
Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
.. code-block:: python
:caption: Example
from keras import Dense, layers
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Log metrics and log the model
with mlflow.start_run() as run:
mlflow.keras.log_model(keras_model, "models")
"""
Model.log(artifact_path=artifact_path, flavor=mlflow.keras,
keras_model=keras_model, conda_env=conda_env, custom_objects=custom_objects,
keras_module=keras_module, registered_model_name=registered_model_name,
signature=signature, input_example=input_example,
**kwargs)
def _save_custom_objects(path, custom_objects):
"""
Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
"""
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, "wb") as out_f:
cloudpickle.dump(custom_objects, out_f)
def _load_model(model_path, keras_module, **kwargs):
keras_models = importlib.import_module(keras_module.__name__ + ".models")
custom_objects = kwargs.pop("custom_objects", {})
custom_objects_path = None
if os.path.isdir(model_path):
if os.path.isfile(os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)):
custom_objects_path = os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)
model_path = os.path.join(model_path, _MODEL_SAVE_PATH)
if custom_objects_path is not None:
import cloudpickle
with open(custom_objects_path, "rb") as in_f:
pickled_custom_objects = cloudpickle.load(in_f)
pickled_custom_objects.update(custom_objects)
custom_objects = pickled_custom_objects
from distutils.version import StrictVersion
if StrictVersion(keras_module.__version__.split('-')[0]) >= StrictVersion("2.2.3"):
# NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead
# of string to avoid issues.
import h5py
with h5py.File(os.path.abspath(model_path), "r") as model_path:
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
else:
# NOTE: Older versions of Keras only handle filepath.
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
class _KerasModelWrapper:
def __init__(self, keras_model, graph, sess):
self.keras_model = keras_model
self._graph = graph
self._sess = sess
def predict(self, dataframe):
# In TensorFlow < 2.0, we use a graph and session to predict
if self._graph is not None:
with self._graph.as_default():
with self._sess.as_default():
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
# In TensorFlow >= 2.0, we do not use a graph and session to predict
else:
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
predicted.index = dataframe.index
return predicted
# MASKED: _load_pyfunc function (lines 381-414)
def load_model(model_uri, **kwargs):
"""
Load a Keras model from a local file or a run.
Extra arguments are passed through to keras.load_model.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:return: A Keras model instance.
.. code-block:: python
:caption: Example
# Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame
keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models")
predictions = keras_model.predict(x_test)
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
keras_module = importlib.import_module(flavor_conf.get("keras_module", "keras"))
keras_model_artifacts_path = os.path.join(
local_model_path,
flavor_conf.get("data", _MODEL_SAVE_PATH))
return _load_model(model_path=keras_model_artifacts_path, keras_module=keras_module, **kwargs)
@experimental
def autolog():
# pylint: disable=E0611
"""
Enables automatic logging from Keras to MLflow. Autologging captures the following information:
**Metrics** and **Parameters**
- Training loss; validation loss; user-specified metrics
- Metrics associated with the ``EarlyStopping`` callbacks: ``stopped_epoch``,
``restored_epoch``, ``restore_best_weight``, ``last_epoch``, etc
- ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon
- ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``: ``min_delta``,
``patience``, ``baseline``, ``restore_best_weights``, etc
**Artifacts**
- Model summary on training start
- `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model) on training end
.. code-block:: python
:caption: Example
import mlflow
import mlflow.keras
# Build, compile, enable autologging, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
# autolog your metrics, parameters, and model
mlflow.keras.autolog()
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size=128, validation_data=(x_val, y_val))
``EarlyStopping Integration with Keras AutoLogging``
MLflow will detect if an ``EarlyStopping`` callback is used in a ``fit()`` or
``fit_generator()`` call, and if the ``restore_best_weights`` parameter is set to be ``True``,
then MLflow will log the metrics associated with the restored model as a final, extra step.
The epoch of the restored model will also be logged as the metric ``restored_epoch``.
This allows for easy comparison between the actual metrics of the restored model and
the metrics of other models.
If ``restore_best_weights`` is set to be ``False``, then MLflow will not log an additional step.
Regardless of ``restore_best_weights``, MLflow will also log ``stopped_epoch``,
which indicates the epoch at which training stopped due to early stopping.
If training does not end due to early stopping, then ``stopped_epoch`` will be logged as ``0``.
MLflow will also log the parameters of the ``EarlyStopping`` callback,
excluding ``mode`` and ``verbose``.
"""
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
"""
Callback for auto-logging metrics and parameters.
Records available logs after each epoch.
Records model structural information as params when training begins
"""
def on_train_begin(self, logs=None): # pylint: disable=unused-argument
try_mlflow_log(mlflow.log_param, 'num_layers', len(self.model.layers))
try_mlflow_log(mlflow.log_param, 'optimizer_name', type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, 'lr'):
lr = self.model.optimizer.lr if \
type(self.model.optimizer.lr) is float \
else keras.backend.eval(self.model.optimizer.lr)
try_mlflow_log(mlflow.log_param, 'learning_rate', lr)
if hasattr(self.model.optimizer, 'epsilon'):
epsilon = self.model.optimizer.epsilon if \
type(self.model.optimizer.epsilon) is float \
else keras.backend.eval(self.model.optimizer.epsilon)
try_mlflow_log(mlflow.log_param, 'epsilon', epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
tempdir = tempfile.mkdtemp()
try:
summary_file = os.path.join(tempdir, "model_summary.txt")
with open(summary_file, 'w') as f:
f.write(summary)
try_mlflow_log(mlflow.log_artifact, local_path=summary_file)
finally:
shutil.rmtree(tempdir)
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None):
try_mlflow_log(log_model, self.model, artifact_path='model')
# As of Keras 2.4.0, Keras Callback implementations must define the following
# methods indicating whether or not the callback overrides functions for
# batch training/testing/inference
def _implements_train_batch_hooks(self): return False
def _implements_test_batch_hooks(self): return False
def _implements_predict_batch_hooks(self): return False
def _early_stop_check(callbacks):
if LooseVersion(keras.__version__) < LooseVersion('2.3.0'):
es_callback = keras.callbacks.EarlyStopping
else:
es_callback = keras.callbacks.callbacks.EarlyStopping
for callback in callbacks:
if isinstance(callback, es_callback):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {'monitor': callback.monitor,
'min_delta': callback.min_delta,
'patience': callback.patience,
'baseline': callback.baseline,
'restore_best_weights': callback.restore_best_weights}
try_mlflow_log(mlflow.log_params, earlystopping_params)
except Exception: # pylint: disable=W0703
return
def _get_early_stop_callback_attrs(callback):
try:
return callback.stopped_epoch, callback.restore_best_weights, callback.patience
except Exception: # pylint: disable=W0703
return None
def _log_early_stop_callback_metrics(callback, history):
if callback:
callback_attrs = _get_early_stop_callback_attrs(callback)
if callback_attrs is None:
return
stopped_epoch, restore_best_weights, patience = callback_attrs
try_mlflow_log(mlflow.log_metric, 'stopped_epoch', stopped_epoch)
# Weights are restored only if early stopping occurs
if stopped_epoch != 0 and restore_best_weights:
restored_epoch = stopped_epoch - max(1, patience)
try_mlflow_log(mlflow.log_metric, 'restored_epoch', restored_epoch)
restored_metrics = {key: history.history[key][restored_epoch]
for key in history.history.keys()}
# Checking that a metric history exists
metric_key = next(iter(history.history), None)
if metric_key is not None:
last_epoch = len(history.history[metric_key])
try_mlflow_log(mlflow.log_metrics, restored_metrics, step=last_epoch)
def _run_and_log_function(self, original, args, kwargs, unlogged_params, callback_arg_index):
if not mlflow.active_run():
try_mlflow_log(mlflow.start_run)
auto_end_run = True
else:
auto_end_run = False
log_fn_args_as_params(original, args, kwargs, unlogged_params)
early_stop_callback = None
# Checking if the 'callback' argument of the function is set
if len(args) > callback_arg_index:
tmp_list = list(args)
early_stop_callback = _early_stop_check(tmp_list[callback_arg_index])
tmp_list[callback_arg_index] += [__MLflowKerasCallback()]
args = tuple(tmp_list)
elif 'callbacks' in kwargs:
early_stop_callback = _early_stop_check(kwargs['callbacks'])
kwargs['callbacks'] += [__MLflowKerasCallback()]
else:
kwargs['callbacks'] = [__MLflowKerasCallback()]
_log_early_stop_callback_params(early_stop_callback)
history = original(self, *args, **kwargs)
_log_early_stop_callback_metrics(early_stop_callback, history)
if auto_end_run:
try_mlflow_log(mlflow.end_run)
return history
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit')
unlogged_params = ['self', 'x', 'y', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 5)
@gorilla.patch(keras.Model)
def fit_generator(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit_generator')
unlogged_params = ['self', 'generator', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 4)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
gorilla.apply(gorilla.Patch(keras.Model, 'fit', fit, settings=settings))
gorilla.apply(gorilla.Patch(keras.Model, 'fit_generator', fit_generator, settings=settings))
|
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.
"""
import tensorflow as tf
if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)):
with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH), "r") as f:
keras_module = importlib.import_module(f.read())
else:
import keras
keras_module = keras
K = importlib.import_module(keras_module.__name__ + ".backend")
if keras_module.__name__ == "tensorflow.keras" or K.backend() == 'tensorflow':
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
graph = tf.Graph()
sess = tf.Session(graph=graph)
# By default tf backed models depend on the global graph and session.
# We create an use new Graph and Session and store them with the model
# This way the model is independent on the global state.
with graph.as_default():
with sess.as_default(): # pylint:disable=not-context-manager
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, graph, sess)
else:
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, None, None)
else:
raise MlflowException("Unsupported backend '%s'" % K._BACKEND)
| 381
| 414
|
"""
The ``mlflow.keras`` module provides an API for logging and loading Keras models. This module
exports Keras models with the following flavors:
Keras (native) format
This is the main flavor that can be loaded back into Keras.
:py:mod:`mlflow.pyfunc`
Produced for use by generic pyfunc-based deployment tools and batch inference.
"""
import importlib
import os
import yaml
import gorilla
import tempfile
import shutil
import pandas as pd
from distutils.version import LooseVersion
from mlflow import pyfunc
from mlflow.models import Model
import mlflow.tracking
from mlflow.exceptions import MlflowException
from mlflow.models.signature import ModelSignature
from mlflow.models.utils import ModelInputExample, _save_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.model_utils import _get_flavor_configuration
from mlflow.utils.annotations import experimental
from mlflow.utils.autologging_utils import try_mlflow_log, log_fn_args_as_params
FLAVOR_NAME = "keras"
# File name to which custom objects cloudpickle is saved - used during save and load
_CUSTOM_OBJECTS_SAVE_PATH = "custom_objects.cloudpickle"
_KERAS_MODULE_SPEC_PATH = "keras_module.txt"
# File name to which keras model is saved
_MODEL_SAVE_PATH = "model.h5"
# Conda env subpath when saving/loading model
_CONDA_ENV_SUBPATH = "conda.yaml"
def get_default_conda_env(include_cloudpickle=False, keras_module=None):
"""
:return: The default Conda environment for MLflow Models produced by calls to
:func:`save_model()` and :func:`log_model()`.
"""
import tensorflow as tf
conda_deps = [] # if we use tf.keras we only need to declare dependency on tensorflow
pip_deps = []
if keras_module is None:
import keras
keras_module = keras
if keras_module.__name__ == "keras":
# Temporary fix: the created conda environment has issues installing keras >= 2.3.1
if LooseVersion(keras_module.__version__) < LooseVersion('2.3.1'):
conda_deps.append("keras=={}".format(keras_module.__version__))
else:
pip_deps.append("keras=={}".format(keras_module.__version__))
if include_cloudpickle:
import cloudpickle
pip_deps.append("cloudpickle=={}".format(cloudpickle.__version__))
# Temporary fix: conda-forge currently does not have tensorflow > 1.14
# The Keras pyfunc representation requires the TensorFlow
# backend for Keras. Therefore, the conda environment must
# include TensorFlow
if LooseVersion(tf.__version__) <= LooseVersion('1.13.2'):
conda_deps.append("tensorflow=={}".format(tf.__version__))
else:
pip_deps.append("tensorflow=={}".format(tf.__version__))
return _mlflow_conda_env(
additional_conda_deps=conda_deps,
additional_pip_deps=pip_deps,
additional_conda_channels=None)
def save_model(keras_model, path, conda_env=None, mlflow_model=None, custom_objects=None,
keras_module=None,
signature: ModelSignature = None, input_example: ModelInputExample = None,
**kwargs):
"""
Save a Keras model to a path on the local file system.
:param keras_model: Keras model to be saved.
:param path: Local path where the model is to be saved.
:param conda_env: Either a dictionary representation of a Conda environment or the path to a
Conda environment yaml file. If provided, this decsribes the environment
this model should be run in. At minimum, it should specify the
dependencies contained in :func:`get_default_conda_env()`. If
``None``, the default :func:`get_default_conda_env()` environment is
added to the model. The following is an *example* dictionary
representation of a Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param mlflow_model: MLflow model config this flavor is being added to.
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
.. code-block:: python
:caption: Example
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model_path = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Save the model as an MLflow Model
mlflow.keras.save_model(keras_model, keras_model_path)
"""
if keras_module is None:
def _is_plain_keras(model):
try:
# NB: Network is the first parent with save method
import keras.engine.network
return isinstance(model, keras.engine.network.Network)
except ImportError:
return False
def _is_tf_keras(model):
try:
# NB: Network is not exposed in tf.keras, we check for Model instead.
import tensorflow.keras.models
return isinstance(model, tensorflow.keras.models.Model)
except ImportError:
return False
if _is_plain_keras(keras_model):
keras_module = importlib.import_module("keras")
elif _is_tf_keras(keras_model):
keras_module = importlib.import_module("tensorflow.keras")
else:
raise MlflowException("Unable to infer keras module from the model, please specify "
"which keras module ('keras' or 'tensorflow.keras') is to be "
"used to save and load the model.")
elif type(keras_module) == str:
keras_module = importlib.import_module(keras_module)
# check if path exists
path = os.path.abspath(path)
if os.path.exists(path):
raise MlflowException("Path '{}' already exists".format(path))
# construct new data folder in existing path
data_subpath = "data"
data_path = os.path.join(path, data_subpath)
os.makedirs(data_path)
if mlflow_model is None:
mlflow_model = Model()
if signature is not None:
mlflow_model.signature = signature
if input_example is not None:
_save_example(mlflow_model, input_example, path)
# save custom objects if there are custom objects
if custom_objects is not None:
_save_custom_objects(data_path, custom_objects)
# save keras module spec to path/data/keras_module.txt
with open(os.path.join(data_path, _KERAS_MODULE_SPEC_PATH), "w") as f:
f.write(keras_module.__name__)
# save keras model to path/data/model.h5
model_subpath = os.path.join(data_subpath, _MODEL_SAVE_PATH)
model_path = os.path.join(path, model_subpath)
if path.startswith('/dbfs/'):
# The Databricks Filesystem uses a FUSE implementation that does not support
# random writes. It causes an error.
with tempfile.NamedTemporaryFile(suffix='.h5') as f:
keras_model.save(f.name, **kwargs)
f.flush() # force flush the data
shutil.copyfile(src=f.name, dst=model_path)
else:
keras_model.save(model_path, **kwargs)
# update flavor info to mlflow_model
mlflow_model.add_flavor(FLAVOR_NAME,
keras_module=keras_module.__name__,
keras_version=keras_module.__version__,
data=data_subpath)
# save conda.yaml info to path/conda.yml
if conda_env is None:
conda_env = get_default_conda_env(include_cloudpickle=custom_objects is not None,
keras_module=keras_module)
elif not isinstance(conda_env, dict):
with open(conda_env, "r") as f:
conda_env = yaml.safe_load(f)
with open(os.path.join(path, _CONDA_ENV_SUBPATH), "w") as f:
yaml.safe_dump(conda_env, stream=f, default_flow_style=False)
# append loader_module, data and env data to mlflow_model
pyfunc.add_to_model(mlflow_model, loader_module="mlflow.keras",
data=data_subpath, env=_CONDA_ENV_SUBPATH)
# save mlflow_model to path/MLmodel
mlflow_model.save(os.path.join(path, "MLmodel"))
def log_model(keras_model, artifact_path, conda_env=None, custom_objects=None, keras_module=None,
registered_model_name=None, signature: ModelSignature=None,
input_example: ModelInputExample=None, **kwargs):
"""
Log a Keras model as an MLflow artifact for the current run.
:param keras_model: Keras model to be saved.
:param artifact_path: Run-relative artifact path.
:param conda_env: Either a dictionary representation of a Conda environment or
the path to a Conda environment yaml file.
If provided, this describes the environment this model should be
run in. At minimum, it should specify the dependencies
contained in :func:`get_default_conda_env()`. If ``None``, the default
:func:`mlflow.keras.get_default_conda_env()` environment is added to
the model. The following is an *example* dictionary representation of a
Conda environment::
{
'name': 'mlflow-env',
'channels': ['defaults'],
'dependencies': [
'python=3.7.0',
'keras=2.2.4',
'tensorflow=1.8.0'
]
}
:param custom_objects: A Keras ``custom_objects`` dictionary mapping names (strings) to
custom classes or functions associated with the Keras model. MLflow saves
these custom layers using CloudPickle and restores them automatically
when the model is loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
:param keras_module: Keras module to be used to save / load the model
(``keras`` or ``tf.keras``). If not provided, MLflow will
attempt to infer the Keras module based on the given model.
:param registered_model_name: (Experimental) If given, create a model version under
``registered_model_name``, also creating a registered model if one
with the given name does not exist.
:param signature: (Experimental) :py:class:`ModelSignature <mlflow.models.ModelSignature>`
describes model input and output :py:class:`Schema <mlflow.types.Schema>`.
The model signature can be :py:func:`inferred <mlflow.models.infer_signature>`
from datasets with valid model input (e.g. the training dataset with target
column omitted) and valid model output (e.g. model predictions generated on
the training dataset), for example:
.. code-block:: python
from mlflow.models.signature import infer_signature
train = df.drop_column("target_label")
predictions = ... # compute model predictions
signature = infer_signature(train, predictions)
:param input_example: (Experimental) Input example provides one or several instances of valid
model input. The example can be used as a hint of what data to feed the
model. The given example will be converted to a Pandas DataFrame and then
serialized to json using the Pandas split-oriented format. Bytes are
base64-encoded.
:param kwargs: kwargs to pass to ``keras_model.save`` method.
.. code-block:: python
:caption: Example
from keras import Dense, layers
import mlflow
# Build, compile, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size = 128, validation_data=(x_val, y_val))
# Log metrics and log the model
with mlflow.start_run() as run:
mlflow.keras.log_model(keras_model, "models")
"""
Model.log(artifact_path=artifact_path, flavor=mlflow.keras,
keras_model=keras_model, conda_env=conda_env, custom_objects=custom_objects,
keras_module=keras_module, registered_model_name=registered_model_name,
signature=signature, input_example=input_example,
**kwargs)
def _save_custom_objects(path, custom_objects):
"""
Save custom objects dictionary to a cloudpickle file so a model can be easily loaded later.
:param path: An absolute path that points to the data directory within /path/to/model.
:param custom_objects: Keras ``custom_objects`` is a dictionary mapping
names (strings) to custom classes or functions to be considered
during deserialization. MLflow saves these custom layers using
CloudPickle and restores them automatically when the model is
loaded with :py:func:`mlflow.keras.load_model` and
:py:func:`mlflow.pyfunc.load_model`.
"""
import cloudpickle
custom_objects_path = os.path.join(path, _CUSTOM_OBJECTS_SAVE_PATH)
with open(custom_objects_path, "wb") as out_f:
cloudpickle.dump(custom_objects, out_f)
def _load_model(model_path, keras_module, **kwargs):
keras_models = importlib.import_module(keras_module.__name__ + ".models")
custom_objects = kwargs.pop("custom_objects", {})
custom_objects_path = None
if os.path.isdir(model_path):
if os.path.isfile(os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)):
custom_objects_path = os.path.join(model_path, _CUSTOM_OBJECTS_SAVE_PATH)
model_path = os.path.join(model_path, _MODEL_SAVE_PATH)
if custom_objects_path is not None:
import cloudpickle
with open(custom_objects_path, "rb") as in_f:
pickled_custom_objects = cloudpickle.load(in_f)
pickled_custom_objects.update(custom_objects)
custom_objects = pickled_custom_objects
from distutils.version import StrictVersion
if StrictVersion(keras_module.__version__.split('-')[0]) >= StrictVersion("2.2.3"):
# NOTE: Keras 2.2.3 does not work with unicode paths in python2. Pass in h5py.File instead
# of string to avoid issues.
import h5py
with h5py.File(os.path.abspath(model_path), "r") as model_path:
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
else:
# NOTE: Older versions of Keras only handle filepath.
return keras_models.load_model(model_path, custom_objects=custom_objects, **kwargs)
class _KerasModelWrapper:
def __init__(self, keras_model, graph, sess):
self.keras_model = keras_model
self._graph = graph
self._sess = sess
def predict(self, dataframe):
# In TensorFlow < 2.0, we use a graph and session to predict
if self._graph is not None:
with self._graph.as_default():
with self._sess.as_default():
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
# In TensorFlow >= 2.0, we do not use a graph and session to predict
else:
predicted = pd.DataFrame(self.keras_model.predict(dataframe.values))
predicted.index = dataframe.index
return predicted
def _load_pyfunc(path):
"""
Load PyFunc implementation. Called by ``pyfunc.load_pyfunc``.
:param path: Local filesystem path to the MLflow Model with the ``keras`` flavor.
"""
import tensorflow as tf
if os.path.isfile(os.path.join(path, _KERAS_MODULE_SPEC_PATH)):
with open(os.path.join(path, _KERAS_MODULE_SPEC_PATH), "r") as f:
keras_module = importlib.import_module(f.read())
else:
import keras
keras_module = keras
K = importlib.import_module(keras_module.__name__ + ".backend")
if keras_module.__name__ == "tensorflow.keras" or K.backend() == 'tensorflow':
if LooseVersion(tf.__version__) < LooseVersion('2.0.0'):
graph = tf.Graph()
sess = tf.Session(graph=graph)
# By default tf backed models depend on the global graph and session.
# We create an use new Graph and Session and store them with the model
# This way the model is independent on the global state.
with graph.as_default():
with sess.as_default(): # pylint:disable=not-context-manager
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, graph, sess)
else:
K.set_learning_phase(0)
m = _load_model(path, keras_module=keras_module, compile=False)
return _KerasModelWrapper(m, None, None)
else:
raise MlflowException("Unsupported backend '%s'" % K._BACKEND)
def load_model(model_uri, **kwargs):
"""
Load a Keras model from a local file or a run.
Extra arguments are passed through to keras.load_model.
:param model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``models:/<model_name>/<model_version>``
- ``models:/<model_name>/<stage>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
:return: A Keras model instance.
.. code-block:: python
:caption: Example
# Load persisted model as a Keras model or as a PyFunc, call predict() on a pandas DataFrame
keras_model = mlflow.keras.load_model("runs:/96771d893a5e46159d9f3b49bf9013e2" + "/models")
predictions = keras_model.predict(x_test)
"""
local_model_path = _download_artifact_from_uri(artifact_uri=model_uri)
flavor_conf = _get_flavor_configuration(model_path=local_model_path, flavor_name=FLAVOR_NAME)
keras_module = importlib.import_module(flavor_conf.get("keras_module", "keras"))
keras_model_artifacts_path = os.path.join(
local_model_path,
flavor_conf.get("data", _MODEL_SAVE_PATH))
return _load_model(model_path=keras_model_artifacts_path, keras_module=keras_module, **kwargs)
@experimental
def autolog():
# pylint: disable=E0611
"""
Enables automatic logging from Keras to MLflow. Autologging captures the following information:
**Metrics** and **Parameters**
- Training loss; validation loss; user-specified metrics
- Metrics associated with the ``EarlyStopping`` callbacks: ``stopped_epoch``,
``restored_epoch``, ``restore_best_weight``, ``last_epoch``, etc
- ``fit()`` or ``fit_generator()`` parameters; optimizer name; learning rate; epsilon
- ``fit()`` or ``fit_generator()`` parameters associated with ``EarlyStopping``: ``min_delta``,
``patience``, ``baseline``, ``restore_best_weights``, etc
**Artifacts**
- Model summary on training start
- `MLflow Model <https://mlflow.org/docs/latest/models.html>`_ (Keras model) on training end
.. code-block:: python
:caption: Example
import mlflow
import mlflow.keras
# Build, compile, enable autologging, and train your model
keras_model = ...
keras_model.compile(optimizer="rmsprop", loss="mse", metrics=["accuracy"])
# autolog your metrics, parameters, and model
mlflow.keras.autolog()
results = keras_model.fit(
x_train, y_train, epochs=20, batch_size=128, validation_data=(x_val, y_val))
``EarlyStopping Integration with Keras AutoLogging``
MLflow will detect if an ``EarlyStopping`` callback is used in a ``fit()`` or
``fit_generator()`` call, and if the ``restore_best_weights`` parameter is set to be ``True``,
then MLflow will log the metrics associated with the restored model as a final, extra step.
The epoch of the restored model will also be logged as the metric ``restored_epoch``.
This allows for easy comparison between the actual metrics of the restored model and
the metrics of other models.
If ``restore_best_weights`` is set to be ``False``, then MLflow will not log an additional step.
Regardless of ``restore_best_weights``, MLflow will also log ``stopped_epoch``,
which indicates the epoch at which training stopped due to early stopping.
If training does not end due to early stopping, then ``stopped_epoch`` will be logged as ``0``.
MLflow will also log the parameters of the ``EarlyStopping`` callback,
excluding ``mode`` and ``verbose``.
"""
import keras
class __MLflowKerasCallback(keras.callbacks.Callback):
"""
Callback for auto-logging metrics and parameters.
Records available logs after each epoch.
Records model structural information as params when training begins
"""
def on_train_begin(self, logs=None): # pylint: disable=unused-argument
try_mlflow_log(mlflow.log_param, 'num_layers', len(self.model.layers))
try_mlflow_log(mlflow.log_param, 'optimizer_name', type(self.model.optimizer).__name__)
if hasattr(self.model.optimizer, 'lr'):
lr = self.model.optimizer.lr if \
type(self.model.optimizer.lr) is float \
else keras.backend.eval(self.model.optimizer.lr)
try_mlflow_log(mlflow.log_param, 'learning_rate', lr)
if hasattr(self.model.optimizer, 'epsilon'):
epsilon = self.model.optimizer.epsilon if \
type(self.model.optimizer.epsilon) is float \
else keras.backend.eval(self.model.optimizer.epsilon)
try_mlflow_log(mlflow.log_param, 'epsilon', epsilon)
sum_list = []
self.model.summary(print_fn=sum_list.append)
summary = '\n'.join(sum_list)
tempdir = tempfile.mkdtemp()
try:
summary_file = os.path.join(tempdir, "model_summary.txt")
with open(summary_file, 'w') as f:
f.write(summary)
try_mlflow_log(mlflow.log_artifact, local_path=summary_file)
finally:
shutil.rmtree(tempdir)
def on_epoch_end(self, epoch, logs=None):
if not logs:
return
try_mlflow_log(mlflow.log_metrics, logs, step=epoch)
def on_train_end(self, logs=None):
try_mlflow_log(log_model, self.model, artifact_path='model')
# As of Keras 2.4.0, Keras Callback implementations must define the following
# methods indicating whether or not the callback overrides functions for
# batch training/testing/inference
def _implements_train_batch_hooks(self): return False
def _implements_test_batch_hooks(self): return False
def _implements_predict_batch_hooks(self): return False
def _early_stop_check(callbacks):
if LooseVersion(keras.__version__) < LooseVersion('2.3.0'):
es_callback = keras.callbacks.EarlyStopping
else:
es_callback = keras.callbacks.callbacks.EarlyStopping
for callback in callbacks:
if isinstance(callback, es_callback):
return callback
return None
def _log_early_stop_callback_params(callback):
if callback:
try:
earlystopping_params = {'monitor': callback.monitor,
'min_delta': callback.min_delta,
'patience': callback.patience,
'baseline': callback.baseline,
'restore_best_weights': callback.restore_best_weights}
try_mlflow_log(mlflow.log_params, earlystopping_params)
except Exception: # pylint: disable=W0703
return
def _get_early_stop_callback_attrs(callback):
try:
return callback.stopped_epoch, callback.restore_best_weights, callback.patience
except Exception: # pylint: disable=W0703
return None
def _log_early_stop_callback_metrics(callback, history):
if callback:
callback_attrs = _get_early_stop_callback_attrs(callback)
if callback_attrs is None:
return
stopped_epoch, restore_best_weights, patience = callback_attrs
try_mlflow_log(mlflow.log_metric, 'stopped_epoch', stopped_epoch)
# Weights are restored only if early stopping occurs
if stopped_epoch != 0 and restore_best_weights:
restored_epoch = stopped_epoch - max(1, patience)
try_mlflow_log(mlflow.log_metric, 'restored_epoch', restored_epoch)
restored_metrics = {key: history.history[key][restored_epoch]
for key in history.history.keys()}
# Checking that a metric history exists
metric_key = next(iter(history.history), None)
if metric_key is not None:
last_epoch = len(history.history[metric_key])
try_mlflow_log(mlflow.log_metrics, restored_metrics, step=last_epoch)
def _run_and_log_function(self, original, args, kwargs, unlogged_params, callback_arg_index):
if not mlflow.active_run():
try_mlflow_log(mlflow.start_run)
auto_end_run = True
else:
auto_end_run = False
log_fn_args_as_params(original, args, kwargs, unlogged_params)
early_stop_callback = None
# Checking if the 'callback' argument of the function is set
if len(args) > callback_arg_index:
tmp_list = list(args)
early_stop_callback = _early_stop_check(tmp_list[callback_arg_index])
tmp_list[callback_arg_index] += [__MLflowKerasCallback()]
args = tuple(tmp_list)
elif 'callbacks' in kwargs:
early_stop_callback = _early_stop_check(kwargs['callbacks'])
kwargs['callbacks'] += [__MLflowKerasCallback()]
else:
kwargs['callbacks'] = [__MLflowKerasCallback()]
_log_early_stop_callback_params(early_stop_callback)
history = original(self, *args, **kwargs)
_log_early_stop_callback_metrics(early_stop_callback, history)
if auto_end_run:
try_mlflow_log(mlflow.end_run)
return history
@gorilla.patch(keras.Model)
def fit(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit')
unlogged_params = ['self', 'x', 'y', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 5)
@gorilla.patch(keras.Model)
def fit_generator(self, *args, **kwargs):
original = gorilla.get_original_attribute(keras.Model, 'fit_generator')
unlogged_params = ['self', 'generator', 'callbacks', 'validation_data', 'verbose']
return _run_and_log_function(self, original, args, kwargs, unlogged_params, 4)
settings = gorilla.Settings(allow_hit=True, store_hit=True)
gorilla.apply(gorilla.Patch(keras.Model, 'fit', fit, settings=settings))
gorilla.apply(gorilla.Patch(keras.Model, 'fit_generator', fit_generator, settings=settings))
|
_from_ordinalf
|
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
# MASKED: _from_ordinalf function (lines 257-284)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
| 257
| 284
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
num2date
|
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
# MASKED: num2date function (lines 401-424)
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
| 401
| 424
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
drange
|
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
# MASKED: drange function (lines 427-450)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
| 427
| 450
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
_replace_common_substr
|
Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
# MASKED: _replace_common_substr function (lines 490-513)
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
| 490
| 513
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
strftime_pre_1900
|
Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
# MASKED: strftime_pre_1900 function (lines 515-570)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
| 515
| 570
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
strftime
|
Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
# MASKED: strftime function (lines 572-591)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
| 572
| 591
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
__init__
|
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
# MASKED: __init__ function (lines 678-692)
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
| 678
| 692
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
__init__
|
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
# MASKED: __init__ function (lines 1204-1222)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
| 1,204
| 1,222
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
__init__
|
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
# MASKED: __init__ function (lines 1282-1295)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
| 1,282
| 1,295
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
__init__
|
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
# MASKED: __init__ function (lines 1302-1315)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
| 1,302
| 1,315
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
__init__
|
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
# MASKED: __init__ function (lines 1322-1335)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
| 1,322
| 1,335
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
__init__
|
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
# MASKED: __init__ function (lines 1343-1351)
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
| 1,343
| 1,351
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
axisinfo
|
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
# MASKED: axisinfo function (lines 1525-1541)
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
| 1,525
| 1,541
|
#!/usr/bin/env python
"""
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.org>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.org>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six.moves import xrange, zip
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
if hasattr(dt, 'tzinfo') and dt.tzinfo is not None:
delta = dt.tzinfo.utcoffset(dt)
if delta is not None:
dt -= delta
base = float(dt.toordinal())
if isinstance(dt, datetime.datetime):
# Get a datetime object at midnight in the same time zone as dt.
cdate = dt.date()
midnight_time = datetime.time(0, 0, 0, tzinfo=dt.tzinfo)
rdt = datetime.datetime.combine(cdate, midnight_time)
td_remainder = _total_seconds(dt - rdt)
if td_remainder > 0:
base += td_remainder / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
try:
# Available as a native method in Python >= 2.7.
_total_seconds = datetime.timedelta.total_seconds
except AttributeError:
def _total_seconds(tdelta):
"""
Alias providing support for datetime.timedelta.total_seconds() function
calls even in Python < 2.7.
The input `tdelta` is a datetime.timedelta object, and returns a float
containing the total number of seconds representing the `tdelta`
duration. For large durations (> 270 on most platforms), this loses
microsecond accuracy.
"""
return (tdelta.microseconds +
(tdelta.seconds + tdelta.days * SEC_PER_DAY) * 1e6) * 1e-6
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/load_demo.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = _total_seconds(delta) / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
365.0 : '%Y',
30. : '%b %Y',
1.0 : '%b %d %Y',
1./24. : '%H:%M:%S',
1. / (24. * 60.): '%H:%M:%S.%f',
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> formatter = AutoDateFormatter()
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: '%Y',
DAYS_PER_MONTH: '%b %Y',
1.0: '%b %d %Y',
1. / HOURS_PER_DAY: '%H:%M:%S',
1. / (MINUTES_PER_DAY): '%H:%M:%S.%f'}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop, count=self.MAXTICKS + 1)
# estimate the number of ticks very approximately so we don't
# have to do a very expensive (and potentially near infinite)
# 'between' calculation, only to find out it will fail.
nmax, nmin = date2num((vmax, vmin))
estimate = (nmax - nmin) / (self._get_unit() * self._get_interval())
# This estimate is only an estimate, so be really conservative
# about bailing...
if estimate > self.MAXTICKS * 2:
raise RuntimeError(
'RRuleLocator estimated to generate %d ticks from %s to %s: '
'exceeds Locator.MAXTICKS * 2 (%d) ' % (estimate, vmin, vmax,
self.MAXTICKS * 2))
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(_total_seconds(tdelta))
numMicroseconds = np.floor(_total_seconds(tdelta) * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(_total_seconds(delta) * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
|
start
|
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
|
# specifically use concurrent.futures for threadsafety
# asyncio Futures cannot be used across threads
import asyncio
import json
import time
from functools import partial
from kubernetes_asyncio import watch
from traitlets import Any
from traitlets import Bool
from traitlets import Dict
from traitlets import Int
from traitlets import Unicode
from traitlets.config import LoggingConfigurable
from urllib3.exceptions import ReadTimeoutError
from .clients import shared_client
# This is kubernetes client implementation specific, but we need to know
# whether it was a network or watch timeout.
class ResourceReflector(LoggingConfigurable):
"""Base class for keeping a local up-to-date copy of a set of
kubernetes resources.
Must be subclassed once per kind of resource that needs watching.
Creating a reflector should be done with the create() classmethod,
since that, in addition to creating the instance starts the watch task.
Shutting down a reflector should be done by awaiting its stop() method.
KubeSpawner does not do this, because its reflectors are singleton
instances shared among multiple spawners. The watch task therefore runs
until JupyterHub exits.
"""
labels = Dict(
{},
config=True,
help="""
Labels to reflect onto local cache
""",
)
fields = Dict(
{},
config=True,
help="""
Fields to restrict the reflected objects
""",
)
resources = Dict(
{},
help="""
Dictionary of resource names to the appropriate resource objects.
This can be accessed across threads safely.
""",
)
kind = Unicode(
'resource',
help="""
Human readable name for kind of object we're watching for.
Used for diagnostic messages.
""",
)
omit_namespace = Bool(
False,
config=True,
help="""
Set this to true if the reflector is to operate across
multiple namespaces.
""",
)
namespace = Unicode(
None,
allow_none=True,
help="""
Namespace to watch for resources in; leave at 'None' for
multi-namespace reflectors.
""",
)
list_method_name = Unicode(
"",
help="""
Name of function (on apigroup respresented by
`api_group_name`) that is to be called to list resources.
This will be passed a a label selector.
If self.omit_namespace is False you want something of the form
list_namespaced_<resource> - for example,
`list_namespaced_pod` will give you a PodReflector. It will
take its namespace from self.namespace (which therefore should
not be None).
If self.omit_namespace is True, you want
list_<resource>_for_all_namespaces.
This must be set by a subclass.
It is not necessary to set it for pod or event reflectors, because
__init__ will figure it out. If you create your own reflector
subclass you probably want to add the logic to choose the method
name to that class's __init__().
""",
)
api_group_name = Unicode(
'CoreV1Api',
help="""
Name of class that represents the apigroup on which
`list_method_name` is to be found.
Defaults to CoreV1Api, which has everything in the 'core' API group. If you want to watch Ingresses,
for example, you would have to use ExtensionsV1beta1Api
""",
)
request_timeout = Int(
60,
config=True,
help="""
Network timeout for kubernetes watch.
Trigger watch reconnect when a given request is taking too long,
which can indicate network issues.
""",
)
timeout_seconds = Int(
10,
config=True,
help="""
Timeout for kubernetes watch.
Trigger watch reconnect when no watch event has been received.
This will cause a full reload of the currently existing resources
from the API server.
""",
)
restart_seconds = Int(
30,
config=True,
help="""
Maximum time before restarting a watch.
The watch will be restarted at least this often,
even if events are still arriving.
Avoids trusting kubernetes watch to yield all events,
which seems to not be a safe assumption.
""",
)
on_failure = Any(help="""Function to be called when the reflector gives up.""")
_stopping = Bool(False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Client configuration for kubernetes, as done via the load_config
# function, has already taken place in KubeSpawner or KubeIngressProxy
# initialization steps.
self.api = shared_client(self.api_group_name)
# FIXME: Protect against malicious labels?
self.label_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.labels.items()]
)
self.field_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.fields.items()]
)
self.first_load_future = asyncio.Future()
# Make sure that we know kind, whether we should omit the
# namespace, and what our list_method_name is. For the things
# we already know about, we can derive list_method_name from
# those two things. New reflector types should also update
# their __init__() methods to derive list_method_name, but you
# could just set it directly in the subclass.
if not self.list_method_name:
plural_to_singular = {
"endpoints": "endpoints",
"events": "event",
"ingresses": "ingress",
"pods": "pod",
"services": "service",
}
if self.kind in plural_to_singular:
if self.omit_namespace:
self.list_method_name = (
f"list_{plural_to_singular[self.kind]}_for_all_namespaces"
)
else:
self.list_method_name = (
f"list_namespaced_{plural_to_singular[self.kind]}"
)
# Make sure we have the required values.
if not self.kind:
raise RuntimeError("Reflector kind must be set!")
if not self.list_method_name:
raise RuntimeError("Reflector list_method_name must be set!")
self.watch_task = None
async def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = None
kwargs = dict(
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
_preload_content=False,
)
if not self.omit_namespace:
kwargs["namespace"] = self.namespace
list_method = getattr(self.api, self.list_method_name)
initial_resources_raw = await list_method(**kwargs)
# This is an atomic operation on the dictionary!
initial_resources = json.loads(await initial_resources_raw.read())
self.resources = {
f'{p["metadata"]["namespace"]}/{p["metadata"]["name"]}': p
for p in initial_resources["items"]
}
if not self.first_load_future.done():
# signal that we've loaded our initial data at least once
self.first_load_future.set_result(None)
# return the resource version so we can hook up a watch
return initial_resources["metadata"]["resourceVersion"]
async def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Since the resources are read-only in the Spawner (where they are
used), then this is safe. The Spawner's view of the world might be
out-of-date, but it's not going to corrupt any data.
"""
selectors = []
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
if self.omit_namespace:
ns_str = "all namespaces"
else:
ns_str = "namespace {}".format(self.namespace)
self.log.info(
"watching for %s with %s in %s",
self.kind,
log_selector,
ns_str,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = await self._list_and_update()
watch_args = {
"label_selector": self.label_selector,
"field_selector": self.field_selector,
"resource_version": resource_version,
}
if not self.omit_namespace:
watch_args["namespace"] = self.namespace
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
# Calling the method with _preload_content=False is a performance
# optimization making the Kubernetes client do less work. See
# https://github.com/jupyterhub/kubespawner/pull/424.
method = partial(
getattr(self.api, self.list_method_name), _preload_content=False
)
async with w.stream(method, **watch_args) as stream:
async for watch_event in stream:
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
# Remember that these events are k8s api related WatchEvents
# objects, not k8s Event or Pod representations, they will
# reside in the WatchEvent's object field depending on what
# kind of resource is watched.
#
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#watchevent-v1-meta
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
cur_delay = 0.1
resource = watch_event['raw_object']
ref_key = "{}/{}".format(
resource["metadata"]["namespace"],
resource["metadata"]["name"],
)
if watch_event['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(ref_key, None)
else:
# This is an atomic operation on the dictionary!
self.resources[ref_key] = resource
if self._stopping:
self.log.info("%s watcher stopped: inner", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind,
watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning("Read timeout watching %s, reconnecting", self.kind)
continue
except asyncio.CancelledError:
self.log.debug("Cancelled watching %s", self.kind)
raise
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception(
"Error when watching resources, retrying in %ss", cur_delay
)
await asyncio.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stopping:
self.log.info("%s watcher stopped: outer", self.kind)
break
self.log.warning("%s watcher finished", self.kind)
# MASKED: start function (lines 378-392)
async def stop(self):
"""
Cleanly shut down the watch task.
"""
self._stopping = True
if self.watch_task and not self.watch_task.done():
# cancel the task, wait for it to complete
self.watch_task.cancel()
try:
timeout = 5
await asyncio.wait_for(self.watch_task, timeout)
except asyncio.TimeoutError:
# Raising the TimeoutError will cancel the task.
self.log.warning(
f"Watch task did not finish in {timeout}s and was cancelled"
)
self.watch_task = None
class NamespacedResourceReflector(ResourceReflector):
"""
Watches for resources in a particular namespace. The list_methods
want both a method name and a namespace.
"""
omit_namespace = False
class MultiNamespaceResourceReflector(ResourceReflector):
"""
Watches for resources across all namespaces. The list_methods
want only a method name. Note that this requires the service account
to be significantly more powerful, since it must be bound to ClusterRoles
rather than just Roles, and therefore this is inherently more
dangerous.
"""
omit_namespace = True
|
async def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if self.watch_task and not self.watch_task.done():
raise RuntimeError('Task watching for resources is already running')
await self._list_and_update()
self.watch_task = asyncio.create_task(self._watch_and_update())
| 378
| 392
|
# specifically use concurrent.futures for threadsafety
# asyncio Futures cannot be used across threads
import asyncio
import json
import time
from functools import partial
from kubernetes_asyncio import watch
from traitlets import Any
from traitlets import Bool
from traitlets import Dict
from traitlets import Int
from traitlets import Unicode
from traitlets.config import LoggingConfigurable
from urllib3.exceptions import ReadTimeoutError
from .clients import shared_client
# This is kubernetes client implementation specific, but we need to know
# whether it was a network or watch timeout.
class ResourceReflector(LoggingConfigurable):
"""Base class for keeping a local up-to-date copy of a set of
kubernetes resources.
Must be subclassed once per kind of resource that needs watching.
Creating a reflector should be done with the create() classmethod,
since that, in addition to creating the instance starts the watch task.
Shutting down a reflector should be done by awaiting its stop() method.
KubeSpawner does not do this, because its reflectors are singleton
instances shared among multiple spawners. The watch task therefore runs
until JupyterHub exits.
"""
labels = Dict(
{},
config=True,
help="""
Labels to reflect onto local cache
""",
)
fields = Dict(
{},
config=True,
help="""
Fields to restrict the reflected objects
""",
)
resources = Dict(
{},
help="""
Dictionary of resource names to the appropriate resource objects.
This can be accessed across threads safely.
""",
)
kind = Unicode(
'resource',
help="""
Human readable name for kind of object we're watching for.
Used for diagnostic messages.
""",
)
omit_namespace = Bool(
False,
config=True,
help="""
Set this to true if the reflector is to operate across
multiple namespaces.
""",
)
namespace = Unicode(
None,
allow_none=True,
help="""
Namespace to watch for resources in; leave at 'None' for
multi-namespace reflectors.
""",
)
list_method_name = Unicode(
"",
help="""
Name of function (on apigroup respresented by
`api_group_name`) that is to be called to list resources.
This will be passed a a label selector.
If self.omit_namespace is False you want something of the form
list_namespaced_<resource> - for example,
`list_namespaced_pod` will give you a PodReflector. It will
take its namespace from self.namespace (which therefore should
not be None).
If self.omit_namespace is True, you want
list_<resource>_for_all_namespaces.
This must be set by a subclass.
It is not necessary to set it for pod or event reflectors, because
__init__ will figure it out. If you create your own reflector
subclass you probably want to add the logic to choose the method
name to that class's __init__().
""",
)
api_group_name = Unicode(
'CoreV1Api',
help="""
Name of class that represents the apigroup on which
`list_method_name` is to be found.
Defaults to CoreV1Api, which has everything in the 'core' API group. If you want to watch Ingresses,
for example, you would have to use ExtensionsV1beta1Api
""",
)
request_timeout = Int(
60,
config=True,
help="""
Network timeout for kubernetes watch.
Trigger watch reconnect when a given request is taking too long,
which can indicate network issues.
""",
)
timeout_seconds = Int(
10,
config=True,
help="""
Timeout for kubernetes watch.
Trigger watch reconnect when no watch event has been received.
This will cause a full reload of the currently existing resources
from the API server.
""",
)
restart_seconds = Int(
30,
config=True,
help="""
Maximum time before restarting a watch.
The watch will be restarted at least this often,
even if events are still arriving.
Avoids trusting kubernetes watch to yield all events,
which seems to not be a safe assumption.
""",
)
on_failure = Any(help="""Function to be called when the reflector gives up.""")
_stopping = Bool(False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Client configuration for kubernetes, as done via the load_config
# function, has already taken place in KubeSpawner or KubeIngressProxy
# initialization steps.
self.api = shared_client(self.api_group_name)
# FIXME: Protect against malicious labels?
self.label_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.labels.items()]
)
self.field_selector = ','.join(
['{}={}'.format(k, v) for k, v in self.fields.items()]
)
self.first_load_future = asyncio.Future()
# Make sure that we know kind, whether we should omit the
# namespace, and what our list_method_name is. For the things
# we already know about, we can derive list_method_name from
# those two things. New reflector types should also update
# their __init__() methods to derive list_method_name, but you
# could just set it directly in the subclass.
if not self.list_method_name:
plural_to_singular = {
"endpoints": "endpoints",
"events": "event",
"ingresses": "ingress",
"pods": "pod",
"services": "service",
}
if self.kind in plural_to_singular:
if self.omit_namespace:
self.list_method_name = (
f"list_{plural_to_singular[self.kind]}_for_all_namespaces"
)
else:
self.list_method_name = (
f"list_namespaced_{plural_to_singular[self.kind]}"
)
# Make sure we have the required values.
if not self.kind:
raise RuntimeError("Reflector kind must be set!")
if not self.list_method_name:
raise RuntimeError("Reflector list_method_name must be set!")
self.watch_task = None
async def _list_and_update(self):
"""
Update current list of resources by doing a full fetch.
Overwrites all current resource info.
"""
initial_resources = None
kwargs = dict(
label_selector=self.label_selector,
field_selector=self.field_selector,
_request_timeout=self.request_timeout,
_preload_content=False,
)
if not self.omit_namespace:
kwargs["namespace"] = self.namespace
list_method = getattr(self.api, self.list_method_name)
initial_resources_raw = await list_method(**kwargs)
# This is an atomic operation on the dictionary!
initial_resources = json.loads(await initial_resources_raw.read())
self.resources = {
f'{p["metadata"]["namespace"]}/{p["metadata"]["name"]}': p
for p in initial_resources["items"]
}
if not self.first_load_future.done():
# signal that we've loaded our initial data at least once
self.first_load_future.set_result(None)
# return the resource version so we can hook up a watch
return initial_resources["metadata"]["resourceVersion"]
async def _watch_and_update(self):
"""
Keeps the current list of resources up-to-date
We first fetch the list of current resources, and store that. Then we
register to be notified of changes to those resources, and keep our
local store up-to-date based on these notifications.
We also perform exponential backoff, giving up after we hit 32s
wait time. This should protect against network connections dropping
and intermittent unavailability of the api-server. Every time we
recover from an exception we also do a full fetch, to pick up
changes that might've been missed in the time we were not doing
a watch.
Since the resources are read-only in the Spawner (where they are
used), then this is safe. The Spawner's view of the world might be
out-of-date, but it's not going to corrupt any data.
"""
selectors = []
if self.label_selector:
selectors.append("label selector=%r" % self.label_selector)
if self.field_selector:
selectors.append("field selector=%r" % self.field_selector)
log_selector = ', '.join(selectors)
cur_delay = 0.1
if self.omit_namespace:
ns_str = "all namespaces"
else:
ns_str = "namespace {}".format(self.namespace)
self.log.info(
"watching for %s with %s in %s",
self.kind,
log_selector,
ns_str,
)
while True:
self.log.debug("Connecting %s watcher", self.kind)
start = time.monotonic()
w = watch.Watch()
try:
resource_version = await self._list_and_update()
watch_args = {
"label_selector": self.label_selector,
"field_selector": self.field_selector,
"resource_version": resource_version,
}
if not self.omit_namespace:
watch_args["namespace"] = self.namespace
if self.request_timeout:
# set network receive timeout
watch_args['_request_timeout'] = self.request_timeout
if self.timeout_seconds:
# set watch timeout
watch_args['timeout_seconds'] = self.timeout_seconds
# Calling the method with _preload_content=False is a performance
# optimization making the Kubernetes client do less work. See
# https://github.com/jupyterhub/kubespawner/pull/424.
method = partial(
getattr(self.api, self.list_method_name), _preload_content=False
)
async with w.stream(method, **watch_args) as stream:
async for watch_event in stream:
# in case of timeout_seconds, the w.stream just exits (no exception thrown)
# -> we stop the watcher and start a new one
# Remember that these events are k8s api related WatchEvents
# objects, not k8s Event or Pod representations, they will
# reside in the WatchEvent's object field depending on what
# kind of resource is watched.
#
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#watchevent-v1-meta
# ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.20/#event-v1-core
cur_delay = 0.1
resource = watch_event['raw_object']
ref_key = "{}/{}".format(
resource["metadata"]["namespace"],
resource["metadata"]["name"],
)
if watch_event['type'] == 'DELETED':
# This is an atomic delete operation on the dictionary!
self.resources.pop(ref_key, None)
else:
# This is an atomic operation on the dictionary!
self.resources[ref_key] = resource
if self._stopping:
self.log.info("%s watcher stopped: inner", self.kind)
break
watch_duration = time.monotonic() - start
if watch_duration >= self.restart_seconds:
self.log.debug(
"Restarting %s watcher after %i seconds",
self.kind,
watch_duration,
)
break
except ReadTimeoutError:
# network read time out, just continue and restart the watch
# this could be due to a network problem or just low activity
self.log.warning("Read timeout watching %s, reconnecting", self.kind)
continue
except asyncio.CancelledError:
self.log.debug("Cancelled watching %s", self.kind)
raise
except Exception:
cur_delay = cur_delay * 2
if cur_delay > 30:
self.log.exception("Watching resources never recovered, giving up")
if self.on_failure:
self.on_failure()
return
self.log.exception(
"Error when watching resources, retrying in %ss", cur_delay
)
await asyncio.sleep(cur_delay)
continue
else:
# no events on watch, reconnect
self.log.debug("%s watcher timeout", self.kind)
finally:
w.stop()
if self._stopping:
self.log.info("%s watcher stopped: outer", self.kind)
break
self.log.warning("%s watcher finished", self.kind)
async def start(self):
"""
Start the reflection process!
We'll do a blocking read of all resources first, so that we don't
race with any operations that are checking the state of the pod
store - such as polls. This should be called only once at the
start of program initialization (when the singleton is being created),
and not afterwards!
"""
if self.watch_task and not self.watch_task.done():
raise RuntimeError('Task watching for resources is already running')
await self._list_and_update()
self.watch_task = asyncio.create_task(self._watch_and_update())
async def stop(self):
"""
Cleanly shut down the watch task.
"""
self._stopping = True
if self.watch_task and not self.watch_task.done():
# cancel the task, wait for it to complete
self.watch_task.cancel()
try:
timeout = 5
await asyncio.wait_for(self.watch_task, timeout)
except asyncio.TimeoutError:
# Raising the TimeoutError will cancel the task.
self.log.warning(
f"Watch task did not finish in {timeout}s and was cancelled"
)
self.watch_task = None
class NamespacedResourceReflector(ResourceReflector):
"""
Watches for resources in a particular namespace. The list_methods
want both a method name and a namespace.
"""
omit_namespace = False
class MultiNamespaceResourceReflector(ResourceReflector):
"""
Watches for resources across all namespaces. The list_methods
want only a method name. Note that this requires the service account
to be significantly more powerful, since it must be bound to ClusterRoles
rather than just Roles, and therefore this is inherently more
dangerous.
"""
omit_namespace = True
|
labels_to_one_hot
|
Convert 1D array of labels to one hot representation
Args:
labels: 1D numpy array
|
# The followings are the DenseNets module, the training was actually taken place in the `run_dense_net.py` file.
# Sorry, I really like Pycharm (and to be fair, Pytorch is so much an easier language to debug)
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
from models import DenseNet
from data_providers.utils import get_data_provider_by_name
import tensorflow as tf
import numpy as np
import json
import pandas as pd
from tqdm import tqdm
import random
import time
from matplotlib import pyplot as plt
# Visualizations will be shown in the notebook.
# % matplotlib inline
from matplotlib import gridspec
# Load pickled data
import pickle
training_file = './data/train.p'
validation_file = './data/valid.p'
testing_file = './data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test_origin = test['features'], test['labels']
train_params_cifar = {
'batch_size': 64,
'n_epochs': 500,
'initial_learning_rate': 0.05,
'reduce_lr_epoch_1': 50, # epochs * 0.5
'reduce_lr_epoch_2': 75, # epochs * 0.75
'validation_set': True,
'validation_split': None, # None or float
'shuffle': 'every_epoch', # None, once_prior_train, every_epoch
'normalization': 'by_chanels', # None, divide_256, divide_255, by_chanels
'use_YUV': True,
'use_Y': False, # use only Y channel
'data_augmentation': 0, # [0, 1]
}
# We save this model params.json from the trained model
with open('model_params.json', 'r') as fp:
model_params = json.load(fp)
# some default params dataset/architecture related
train_params = train_params_cifar
print("Params:")
for k, v in model_params.items():
print("\t%s: %s" % (k, v))
print("Train params:")
for k, v in train_params.items():
print("\t%s: %s" % (k, v))
model_params['use_Y'] = False
print("Prepare training data...")
data_provider = get_data_provider_by_name(model_params['dataset'], train_params)
print("Initialize the model..")
tf.reset_default_graph()
model = DenseNet(data_provider=data_provider, **model_params)
print("Loading trained model")
model.load_model()
print("Data provider test images: ", data_provider.test.num_examples)
print("Testing...")
loss, accuracy = model.test(data_provider.test, batch_size=30)
import cv2
# MASKED: labels_to_one_hot function (lines 81-89)
newimages = []
newlabels = []
new_onehot = []
newlabelsdata = []
directories = "./newimages"
subdirs = os.listdir(directories)
for subdir in subdirs:
classId = int(subdir.split("-")[0])
classinfo = {'label':classId,'count':0, 'samples':[]}
filepath = directories+"/"+subdir
for filename in os.listdir(filepath):
image_filepath = filepath+"/"+filename
image = cv2.imread(image_filepath)
image_rgb = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA)
image = image_rgb.copy()
image[:, :, 0] = image_rgb[:, :, 2]
image[:, :, 2] = image_rgb[:, :, 0]
newimages.append(image)
newlabels.append(classId)
new_onehot.append(labels_to_one_hot(classId))
classinfo['count'] += 1
classinfo['samples'].append(len(newimages)-1)
if classinfo['count'] > 0:
print("appending: ", classinfo)
newlabelsdata.append(classinfo)
newimages = np.array(newimages)
newlabels = np.array(newlabels)
new_onehot = np.array(new_onehot)
from data_providers.GermanTrafficSign import RGB2YUV
X_test_new = RGB2YUV(newimages)
new_image = np.zeros(X_test_new.shape)
for i in range(X_test_new.shape[-1]):
new_image[:, :, :, i] = ((X_test_new[:, :, :, i] - data_provider._means[i]) /data_provider._stds[i])
y_new_onehot = model.predictions_one_image(new_image)[0]
predict_classId = np.argmax(y_new_onehot, axis=1)
incorrectlist = []
for i in range(len(y_new_onehot)):
correct_classId = np.argmax(new_onehot[i],0)
predict_classId = np.argmax(y_new_onehot[i],0)
incorrectlist.append({'index':i, 'correct':correct_classId, 'predicted':predict_classId})
incorrectmatrix = {}
modeCount = 0
for i in range(len(incorrectlist)):
predicted = incorrectlist[i]['predicted']
correct = incorrectlist[i]['correct']
index = incorrectlist[i]['index']
bucket = str(correct) + "+" + str(predicted)
incorrectinstance = incorrectmatrix.get(bucket, {'count': 0, 'samples': []})
# add to the count
count = incorrectinstance['count'] + 1
# add to samples of this correct to predicted condition
samples = incorrectinstance['samples']
samples.append(index)
# put back in the list
incorrectmatrix[bucket] = {'count': count, 'correct': correct, 'predicted': predicted, 'samples': samples}
# update most common error
if count > modeCount:
modeCount = count
modeBucket = bucket
# get the list of buckets and sort them
def compare_bucket_count(bucket):
return modeCount - incorrectmatrix[bucket]['count']
sortedBuckets = list(incorrectmatrix.keys())
sortedBuckets.sort(key=compare_bucket_count)
# get the unique number of original picture sizes and the min and max last instance
n_buckets = len(sortedBuckets)
# print the stats
print("\nNumber of unique buckets in incorrect set: ", n_buckets, "\n")
print("Mode Bucket: ", modeBucket, "with count: ", modeCount)
classLabelList = pd.read_csv('signnames.csv')
print("\nDistribution of buckets with predicted test dataset labels:")
for n in range(len(sortedBuckets)):
bucket = sortedBuckets[n]
cclassId = incorrectmatrix[bucket]['correct']
pclassId = incorrectmatrix[bucket]['predicted']
count = incorrectmatrix[bucket]['count']
cdescription = classLabelList[classLabelList.ClassId == cclassId].SignName.to_string(header=False, index=False)
pdescription = classLabelList[classLabelList.ClassId == pclassId].SignName.to_string(header=False, index=False)
print(
"incorrect set count: {0:4d} CClassId: {1:02d} Description: {2}\n PClassId: {3:02d} Description: {4}".format(
count, cclassId, cdescription, pclassId, pdescription))
def draw_sample_correctmatrix(datasettxt, sortedBuckets, incorrectmatix, dataset, cmap=None):
n_maxsamples = 8
n_labels = len(sortedBuckets)
# size of each sample
fig = plt.figure(figsize=(n_maxsamples * 1.8, n_labels))
w_ratios = [1 for n in range(n_maxsamples)]
w_ratios[:0] = [int(n_maxsamples * 0.8)]
h_ratios = [1 for n in range(n_labels)]
# gridspec
time.sleep(1) # wait for 1 second for the previous print to appear!
grid = gridspec.GridSpec(n_labels, n_maxsamples + 1, wspace=0.0, hspace=0.0, width_ratios=w_ratios,
height_ratios=h_ratios)
labelset_pbar = tqdm(range(n_labels), desc=datasettxt, unit='labels')
for a in labelset_pbar:
cclassId = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['correct']
pclassId = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['predicted']
cdescription = classLabelList[classLabelList.ClassId == cclassId].SignName.to_string(header=False, index=False)
pdescription = classLabelList[classLabelList.ClassId == pclassId].SignName.to_string(header=False, index=False)
count = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['count']
for b in range(n_maxsamples + 1):
i = a * (n_maxsamples + 1) + b
ax = plt.Subplot(fig, grid[i])
if b == 0:
ax.annotate(
'CClassId %d (%d): %s\nPClassId %d: %s' % (cclassId, count, cdescription, pclassId, pdescription),
xy=(0, 0), xytext=(0.0, 0.3))
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
else:
if (b - 1) < count:
image = dataset[incorrectmatrix[sortedBuckets[n_labels - a - 1]]['samples'][b - 1]]
if cmap == None:
ax.imshow(image)
else:
# yuv = cv2.split(image)
# ax.imshow(yuv[0], cmap=cmap)
ax.imshow(image, cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
# hide the borders\
if a == (n_labels - 1):
all_axes = fig.get_axes()
for ax in all_axes:
for sp in ax.spines.values():
sp.set_visible(False)
plt.show()
|
def labels_to_one_hot(labels, n_classes=43+1):
"""Convert 1D array of labels to one hot representation
Args:
labels: 1D numpy array
"""
new_labels = np.zeros((n_classes,))
new_labels[labels] = 1
return new_labels
| 81
| 89
|
# The followings are the DenseNets module, the training was actually taken place in the `run_dense_net.py` file.
# Sorry, I really like Pycharm (and to be fair, Pytorch is so much an easier language to debug)
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
from models import DenseNet
from data_providers.utils import get_data_provider_by_name
import tensorflow as tf
import numpy as np
import json
import pandas as pd
from tqdm import tqdm
import random
import time
from matplotlib import pyplot as plt
# Visualizations will be shown in the notebook.
# % matplotlib inline
from matplotlib import gridspec
# Load pickled data
import pickle
training_file = './data/train.p'
validation_file = './data/valid.p'
testing_file = './data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test_origin = test['features'], test['labels']
train_params_cifar = {
'batch_size': 64,
'n_epochs': 500,
'initial_learning_rate': 0.05,
'reduce_lr_epoch_1': 50, # epochs * 0.5
'reduce_lr_epoch_2': 75, # epochs * 0.75
'validation_set': True,
'validation_split': None, # None or float
'shuffle': 'every_epoch', # None, once_prior_train, every_epoch
'normalization': 'by_chanels', # None, divide_256, divide_255, by_chanels
'use_YUV': True,
'use_Y': False, # use only Y channel
'data_augmentation': 0, # [0, 1]
}
# We save this model params.json from the trained model
with open('model_params.json', 'r') as fp:
model_params = json.load(fp)
# some default params dataset/architecture related
train_params = train_params_cifar
print("Params:")
for k, v in model_params.items():
print("\t%s: %s" % (k, v))
print("Train params:")
for k, v in train_params.items():
print("\t%s: %s" % (k, v))
model_params['use_Y'] = False
print("Prepare training data...")
data_provider = get_data_provider_by_name(model_params['dataset'], train_params)
print("Initialize the model..")
tf.reset_default_graph()
model = DenseNet(data_provider=data_provider, **model_params)
print("Loading trained model")
model.load_model()
print("Data provider test images: ", data_provider.test.num_examples)
print("Testing...")
loss, accuracy = model.test(data_provider.test, batch_size=30)
import cv2
def labels_to_one_hot(labels, n_classes=43+1):
"""Convert 1D array of labels to one hot representation
Args:
labels: 1D numpy array
"""
new_labels = np.zeros((n_classes,))
new_labels[labels] = 1
return new_labels
newimages = []
newlabels = []
new_onehot = []
newlabelsdata = []
directories = "./newimages"
subdirs = os.listdir(directories)
for subdir in subdirs:
classId = int(subdir.split("-")[0])
classinfo = {'label':classId,'count':0, 'samples':[]}
filepath = directories+"/"+subdir
for filename in os.listdir(filepath):
image_filepath = filepath+"/"+filename
image = cv2.imread(image_filepath)
image_rgb = cv2.resize(image, (32, 32), interpolation=cv2.INTER_AREA)
image = image_rgb.copy()
image[:, :, 0] = image_rgb[:, :, 2]
image[:, :, 2] = image_rgb[:, :, 0]
newimages.append(image)
newlabels.append(classId)
new_onehot.append(labels_to_one_hot(classId))
classinfo['count'] += 1
classinfo['samples'].append(len(newimages)-1)
if classinfo['count'] > 0:
print("appending: ", classinfo)
newlabelsdata.append(classinfo)
newimages = np.array(newimages)
newlabels = np.array(newlabels)
new_onehot = np.array(new_onehot)
from data_providers.GermanTrafficSign import RGB2YUV
X_test_new = RGB2YUV(newimages)
new_image = np.zeros(X_test_new.shape)
for i in range(X_test_new.shape[-1]):
new_image[:, :, :, i] = ((X_test_new[:, :, :, i] - data_provider._means[i]) /data_provider._stds[i])
y_new_onehot = model.predictions_one_image(new_image)[0]
predict_classId = np.argmax(y_new_onehot, axis=1)
incorrectlist = []
for i in range(len(y_new_onehot)):
correct_classId = np.argmax(new_onehot[i],0)
predict_classId = np.argmax(y_new_onehot[i],0)
incorrectlist.append({'index':i, 'correct':correct_classId, 'predicted':predict_classId})
incorrectmatrix = {}
modeCount = 0
for i in range(len(incorrectlist)):
predicted = incorrectlist[i]['predicted']
correct = incorrectlist[i]['correct']
index = incorrectlist[i]['index']
bucket = str(correct) + "+" + str(predicted)
incorrectinstance = incorrectmatrix.get(bucket, {'count': 0, 'samples': []})
# add to the count
count = incorrectinstance['count'] + 1
# add to samples of this correct to predicted condition
samples = incorrectinstance['samples']
samples.append(index)
# put back in the list
incorrectmatrix[bucket] = {'count': count, 'correct': correct, 'predicted': predicted, 'samples': samples}
# update most common error
if count > modeCount:
modeCount = count
modeBucket = bucket
# get the list of buckets and sort them
def compare_bucket_count(bucket):
return modeCount - incorrectmatrix[bucket]['count']
sortedBuckets = list(incorrectmatrix.keys())
sortedBuckets.sort(key=compare_bucket_count)
# get the unique number of original picture sizes and the min and max last instance
n_buckets = len(sortedBuckets)
# print the stats
print("\nNumber of unique buckets in incorrect set: ", n_buckets, "\n")
print("Mode Bucket: ", modeBucket, "with count: ", modeCount)
classLabelList = pd.read_csv('signnames.csv')
print("\nDistribution of buckets with predicted test dataset labels:")
for n in range(len(sortedBuckets)):
bucket = sortedBuckets[n]
cclassId = incorrectmatrix[bucket]['correct']
pclassId = incorrectmatrix[bucket]['predicted']
count = incorrectmatrix[bucket]['count']
cdescription = classLabelList[classLabelList.ClassId == cclassId].SignName.to_string(header=False, index=False)
pdescription = classLabelList[classLabelList.ClassId == pclassId].SignName.to_string(header=False, index=False)
print(
"incorrect set count: {0:4d} CClassId: {1:02d} Description: {2}\n PClassId: {3:02d} Description: {4}".format(
count, cclassId, cdescription, pclassId, pdescription))
def draw_sample_correctmatrix(datasettxt, sortedBuckets, incorrectmatix, dataset, cmap=None):
n_maxsamples = 8
n_labels = len(sortedBuckets)
# size of each sample
fig = plt.figure(figsize=(n_maxsamples * 1.8, n_labels))
w_ratios = [1 for n in range(n_maxsamples)]
w_ratios[:0] = [int(n_maxsamples * 0.8)]
h_ratios = [1 for n in range(n_labels)]
# gridspec
time.sleep(1) # wait for 1 second for the previous print to appear!
grid = gridspec.GridSpec(n_labels, n_maxsamples + 1, wspace=0.0, hspace=0.0, width_ratios=w_ratios,
height_ratios=h_ratios)
labelset_pbar = tqdm(range(n_labels), desc=datasettxt, unit='labels')
for a in labelset_pbar:
cclassId = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['correct']
pclassId = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['predicted']
cdescription = classLabelList[classLabelList.ClassId == cclassId].SignName.to_string(header=False, index=False)
pdescription = classLabelList[classLabelList.ClassId == pclassId].SignName.to_string(header=False, index=False)
count = incorrectmatrix[sortedBuckets[n_labels - a - 1]]['count']
for b in range(n_maxsamples + 1):
i = a * (n_maxsamples + 1) + b
ax = plt.Subplot(fig, grid[i])
if b == 0:
ax.annotate(
'CClassId %d (%d): %s\nPClassId %d: %s' % (cclassId, count, cdescription, pclassId, pdescription),
xy=(0, 0), xytext=(0.0, 0.3))
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
else:
if (b - 1) < count:
image = dataset[incorrectmatrix[sortedBuckets[n_labels - a - 1]]['samples'][b - 1]]
if cmap == None:
ax.imshow(image)
else:
# yuv = cv2.split(image)
# ax.imshow(yuv[0], cmap=cmap)
ax.imshow(image, cmap=cmap)
ax.set_xticks([])
ax.set_yticks([])
fig.add_subplot(ax)
# hide the borders\
if a == (n_labels - 1):
all_axes = fig.get_axes()
for ax in all_axes:
for sp in ax.spines.values():
sp.set_visible(False)
plt.show()
|
trajectory_4way
|
Generate trajectory of agent diffusing through 4-way connected graph
At each point we sample the one-hot observation and take an action
0 = up
1 = right
2 = down
3 = left
Params:
steps (int): Number of steps to take
env (3d np array): environment in which to wander (NxNx(num_categories))
Returns
Observations (steps, num_categories), Actions (steps, 4)
|
import os
import sys
import numpy as np
import torch
import pickle
import logging
log = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
class Graph4D():
def __init__(self, num_envs=4096, env_size=(4,4), steps=128, save=False, data_root='./data/', num_categories=None, verbose=False):
self.num_envs = num_envs
self.steps = steps
self.env_size = env_size
self.save = False
self.data_root = data_root
self.num_categories = num_categories
self.generate_data(verbose=verbose)
log.info('''Generated Data:
\t\t\t {} Environments...
\t\t\t {} env size...
\t\t\t {} steps in each...
\t\t\t {} observable one hot categories... '''.format(
num_envs, env_size, steps, self.num_categories
))
def square_env(self):
"""
Generate map where each vertex has a one hot categorical distribution
Returns:
(N,N,num_categories) matrix with one-hot categorical observations
"""
env_size = self.env_size
env = np.zeros((env_size[0], env_size[1], self.num_categories))
for i in range(env_size[0]):
# Randomly assign categories to each vertex in a row
category = np.random.randint(0, self.num_categories, env_size[1])
# One hot encode them
env[i, np.arange(category.size), category] = 1
return env
def update_location_4way(self, env, loc):
"""
Samples a valid four-way action and updates location
"""
length = env.shape[0]
valid = False
# print(loc, end=' --> ')
while not valid:
# Sample action
action = np.random.randint(0, 4)
# Move up
if action == 0:
if loc[0] - 1 >= 0:
# print('Moving up', end=' --> ')
loc[0] -= 1
valid = True
# Move right
elif action == 1:
if loc[1] + 1 < length:
# print('Moving Right', end=' --> ')
loc[1] += 1
valid = True
# Move down
elif action == 2:
if loc[0] + 1 < length:
# print('Moving Down', end=' --> ')
loc[0] += 1
valid = True
# Move left
elif action == 3:
if loc[1] - 1 >= 0:
# print('Moving Left', end=' --> ')
loc[1] -= 1
valid = True
# One hot encode action
act = np.zeros(4)
act[action] = 1
return act, loc
# MASKED: trajectory_4way function (lines 93-121)
def generate_data(self, verbose=False):
"""
Generates N square environments and trajectories ((observation, action) pairs)
for each environment
Params:
envs (int): number of environments to generate
steps (int): how many steps an agent initially takes in each environment
env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)
save (bool): whether or not to save the dataset
Returns:
Dict of "environments, observations, actions", each corresponding to:
environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories),
observations: Array shape: (num_envs, steps, num_categories),
actions: Array shape: (num_envs, steps, 4)
"""
env_size = self.env_size
if self.num_categories == None:
self.num_categories = env_size[0] * env_size[1]
self.environments = np.zeros((self.num_envs, env_size[0], env_size[1], self.num_categories))
self.observations = np.zeros((self.num_envs, self.steps, self.num_categories))
self.actions = np.zeros((self.num_envs, self.steps, 4))
self.positions = np.zeros((self.num_envs, self.steps, 2))
for i in range(self.num_envs):
env = self.square_env() # Generate new environment
obs, acts, pos = self.trajectory_4way(env) # Generate random walk for that environment
self.environments[i] = env
self.observations[i] = obs
self.actions[i] = acts
self.positions[i] = pos
self.data = {'environments': self.environments, 'observations': self.observations, 'actions': self.actions, 'positions': self.positions}
if self.save:
name = os.path.join(self.data_root, 'four_way_graph.pickle')
with open(name, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
print('Generating 20 (8,8) environments with 256 random steps in each.')
graph = Graph4D(num_envs=20, env_size=(8,8), steps=256)
data = graph.data
envs = graph.environments
observations = graph.observations
actions = graph.actions
positions = graph.positions
print('Envs,', envs.shape)
print('Obs', observations.shape)
print('Acts', actions.shape)
print('Pos', positions.shape)
|
def trajectory_4way(self, env):
"""
Generate trajectory of agent diffusing through 4-way connected graph
At each point we sample the one-hot observation and take an action
0 = up
1 = right
2 = down
3 = left
Params:
steps (int): Number of steps to take
env (3d np array): environment in which to wander (NxNx(num_categories))
Returns
Observations (steps, num_categories), Actions (steps, 4)
"""
observations = np.zeros((self.steps, self.num_categories))
actions = np.zeros((self.steps, 4))
positions = np.zeros((self.steps, 2))
loc = np.random.randint(0, env.shape[0], 2) # Initial Location
for step in range(self.steps):
positions[step] = loc
obs = env[loc[0], loc[1]] # Observe scene
action, loc = self.update_location_4way(env, loc) # Sample action and new location
observations[step] = obs
actions[step] = action
return observations, actions, positions
| 93
| 121
|
import os
import sys
import numpy as np
import torch
import pickle
import logging
log = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
class Graph4D():
def __init__(self, num_envs=4096, env_size=(4,4), steps=128, save=False, data_root='./data/', num_categories=None, verbose=False):
self.num_envs = num_envs
self.steps = steps
self.env_size = env_size
self.save = False
self.data_root = data_root
self.num_categories = num_categories
self.generate_data(verbose=verbose)
log.info('''Generated Data:
\t\t\t {} Environments...
\t\t\t {} env size...
\t\t\t {} steps in each...
\t\t\t {} observable one hot categories... '''.format(
num_envs, env_size, steps, self.num_categories
))
def square_env(self):
"""
Generate map where each vertex has a one hot categorical distribution
Returns:
(N,N,num_categories) matrix with one-hot categorical observations
"""
env_size = self.env_size
env = np.zeros((env_size[0], env_size[1], self.num_categories))
for i in range(env_size[0]):
# Randomly assign categories to each vertex in a row
category = np.random.randint(0, self.num_categories, env_size[1])
# One hot encode them
env[i, np.arange(category.size), category] = 1
return env
def update_location_4way(self, env, loc):
"""
Samples a valid four-way action and updates location
"""
length = env.shape[0]
valid = False
# print(loc, end=' --> ')
while not valid:
# Sample action
action = np.random.randint(0, 4)
# Move up
if action == 0:
if loc[0] - 1 >= 0:
# print('Moving up', end=' --> ')
loc[0] -= 1
valid = True
# Move right
elif action == 1:
if loc[1] + 1 < length:
# print('Moving Right', end=' --> ')
loc[1] += 1
valid = True
# Move down
elif action == 2:
if loc[0] + 1 < length:
# print('Moving Down', end=' --> ')
loc[0] += 1
valid = True
# Move left
elif action == 3:
if loc[1] - 1 >= 0:
# print('Moving Left', end=' --> ')
loc[1] -= 1
valid = True
# One hot encode action
act = np.zeros(4)
act[action] = 1
return act, loc
def trajectory_4way(self, env):
"""
Generate trajectory of agent diffusing through 4-way connected graph
At each point we sample the one-hot observation and take an action
0 = up
1 = right
2 = down
3 = left
Params:
steps (int): Number of steps to take
env (3d np array): environment in which to wander (NxNx(num_categories))
Returns
Observations (steps, num_categories), Actions (steps, 4)
"""
observations = np.zeros((self.steps, self.num_categories))
actions = np.zeros((self.steps, 4))
positions = np.zeros((self.steps, 2))
loc = np.random.randint(0, env.shape[0], 2) # Initial Location
for step in range(self.steps):
positions[step] = loc
obs = env[loc[0], loc[1]] # Observe scene
action, loc = self.update_location_4way(env, loc) # Sample action and new location
observations[step] = obs
actions[step] = action
return observations, actions, positions
def generate_data(self, verbose=False):
"""
Generates N square environments and trajectories ((observation, action) pairs)
for each environment
Params:
envs (int): number of environments to generate
steps (int): how many steps an agent initially takes in each environment
env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)
save (bool): whether or not to save the dataset
Returns:
Dict of "environments, observations, actions", each corresponding to:
environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories),
observations: Array shape: (num_envs, steps, num_categories),
actions: Array shape: (num_envs, steps, 4)
"""
env_size = self.env_size
if self.num_categories == None:
self.num_categories = env_size[0] * env_size[1]
self.environments = np.zeros((self.num_envs, env_size[0], env_size[1], self.num_categories))
self.observations = np.zeros((self.num_envs, self.steps, self.num_categories))
self.actions = np.zeros((self.num_envs, self.steps, 4))
self.positions = np.zeros((self.num_envs, self.steps, 2))
for i in range(self.num_envs):
env = self.square_env() # Generate new environment
obs, acts, pos = self.trajectory_4way(env) # Generate random walk for that environment
self.environments[i] = env
self.observations[i] = obs
self.actions[i] = acts
self.positions[i] = pos
self.data = {'environments': self.environments, 'observations': self.observations, 'actions': self.actions, 'positions': self.positions}
if self.save:
name = os.path.join(self.data_root, 'four_way_graph.pickle')
with open(name, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
print('Generating 20 (8,8) environments with 256 random steps in each.')
graph = Graph4D(num_envs=20, env_size=(8,8), steps=256)
data = graph.data
envs = graph.environments
observations = graph.observations
actions = graph.actions
positions = graph.positions
print('Envs,', envs.shape)
print('Obs', observations.shape)
print('Acts', actions.shape)
print('Pos', positions.shape)
|
generate_data
|
Generates N square environments and trajectories ((observation, action) pairs)
for each environment
Params:
envs (int): number of environments to generate
steps (int): how many steps an agent initially takes in each environment
env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)
save (bool): whether or not to save the dataset
Returns:
Dict of "environments, observations, actions", each corresponding to:
environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories),
observations: Array shape: (num_envs, steps, num_categories),
actions: Array shape: (num_envs, steps, 4)
|
import os
import sys
import numpy as np
import torch
import pickle
import logging
log = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
class Graph4D():
def __init__(self, num_envs=4096, env_size=(4,4), steps=128, save=False, data_root='./data/', num_categories=None, verbose=False):
self.num_envs = num_envs
self.steps = steps
self.env_size = env_size
self.save = False
self.data_root = data_root
self.num_categories = num_categories
self.generate_data(verbose=verbose)
log.info('''Generated Data:
\t\t\t {} Environments...
\t\t\t {} env size...
\t\t\t {} steps in each...
\t\t\t {} observable one hot categories... '''.format(
num_envs, env_size, steps, self.num_categories
))
def square_env(self):
"""
Generate map where each vertex has a one hot categorical distribution
Returns:
(N,N,num_categories) matrix with one-hot categorical observations
"""
env_size = self.env_size
env = np.zeros((env_size[0], env_size[1], self.num_categories))
for i in range(env_size[0]):
# Randomly assign categories to each vertex in a row
category = np.random.randint(0, self.num_categories, env_size[1])
# One hot encode them
env[i, np.arange(category.size), category] = 1
return env
def update_location_4way(self, env, loc):
"""
Samples a valid four-way action and updates location
"""
length = env.shape[0]
valid = False
# print(loc, end=' --> ')
while not valid:
# Sample action
action = np.random.randint(0, 4)
# Move up
if action == 0:
if loc[0] - 1 >= 0:
# print('Moving up', end=' --> ')
loc[0] -= 1
valid = True
# Move right
elif action == 1:
if loc[1] + 1 < length:
# print('Moving Right', end=' --> ')
loc[1] += 1
valid = True
# Move down
elif action == 2:
if loc[0] + 1 < length:
# print('Moving Down', end=' --> ')
loc[0] += 1
valid = True
# Move left
elif action == 3:
if loc[1] - 1 >= 0:
# print('Moving Left', end=' --> ')
loc[1] -= 1
valid = True
# One hot encode action
act = np.zeros(4)
act[action] = 1
return act, loc
def trajectory_4way(self, env):
"""
Generate trajectory of agent diffusing through 4-way connected graph
At each point we sample the one-hot observation and take an action
0 = up
1 = right
2 = down
3 = left
Params:
steps (int): Number of steps to take
env (3d np array): environment in which to wander (NxNx(num_categories))
Returns
Observations (steps, num_categories), Actions (steps, 4)
"""
observations = np.zeros((self.steps, self.num_categories))
actions = np.zeros((self.steps, 4))
positions = np.zeros((self.steps, 2))
loc = np.random.randint(0, env.shape[0], 2) # Initial Location
for step in range(self.steps):
positions[step] = loc
obs = env[loc[0], loc[1]] # Observe scene
action, loc = self.update_location_4way(env, loc) # Sample action and new location
observations[step] = obs
actions[step] = action
return observations, actions, positions
# MASKED: generate_data function (lines 123-162)
if __name__=='__main__':
print('Generating 20 (8,8) environments with 256 random steps in each.')
graph = Graph4D(num_envs=20, env_size=(8,8), steps=256)
data = graph.data
envs = graph.environments
observations = graph.observations
actions = graph.actions
positions = graph.positions
print('Envs,', envs.shape)
print('Obs', observations.shape)
print('Acts', actions.shape)
print('Pos', positions.shape)
|
def generate_data(self, verbose=False):
"""
Generates N square environments and trajectories ((observation, action) pairs)
for each environment
Params:
envs (int): number of environments to generate
steps (int): how many steps an agent initially takes in each environment
env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)
save (bool): whether or not to save the dataset
Returns:
Dict of "environments, observations, actions", each corresponding to:
environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories),
observations: Array shape: (num_envs, steps, num_categories),
actions: Array shape: (num_envs, steps, 4)
"""
env_size = self.env_size
if self.num_categories == None:
self.num_categories = env_size[0] * env_size[1]
self.environments = np.zeros((self.num_envs, env_size[0], env_size[1], self.num_categories))
self.observations = np.zeros((self.num_envs, self.steps, self.num_categories))
self.actions = np.zeros((self.num_envs, self.steps, 4))
self.positions = np.zeros((self.num_envs, self.steps, 2))
for i in range(self.num_envs):
env = self.square_env() # Generate new environment
obs, acts, pos = self.trajectory_4way(env) # Generate random walk for that environment
self.environments[i] = env
self.observations[i] = obs
self.actions[i] = acts
self.positions[i] = pos
self.data = {'environments': self.environments, 'observations': self.observations, 'actions': self.actions, 'positions': self.positions}
if self.save:
name = os.path.join(self.data_root, 'four_way_graph.pickle')
with open(name, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 123
| 162
|
import os
import sys
import numpy as np
import torch
import pickle
import logging
log = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO
)
class Graph4D():
def __init__(self, num_envs=4096, env_size=(4,4), steps=128, save=False, data_root='./data/', num_categories=None, verbose=False):
self.num_envs = num_envs
self.steps = steps
self.env_size = env_size
self.save = False
self.data_root = data_root
self.num_categories = num_categories
self.generate_data(verbose=verbose)
log.info('''Generated Data:
\t\t\t {} Environments...
\t\t\t {} env size...
\t\t\t {} steps in each...
\t\t\t {} observable one hot categories... '''.format(
num_envs, env_size, steps, self.num_categories
))
def square_env(self):
"""
Generate map where each vertex has a one hot categorical distribution
Returns:
(N,N,num_categories) matrix with one-hot categorical observations
"""
env_size = self.env_size
env = np.zeros((env_size[0], env_size[1], self.num_categories))
for i in range(env_size[0]):
# Randomly assign categories to each vertex in a row
category = np.random.randint(0, self.num_categories, env_size[1])
# One hot encode them
env[i, np.arange(category.size), category] = 1
return env
def update_location_4way(self, env, loc):
"""
Samples a valid four-way action and updates location
"""
length = env.shape[0]
valid = False
# print(loc, end=' --> ')
while not valid:
# Sample action
action = np.random.randint(0, 4)
# Move up
if action == 0:
if loc[0] - 1 >= 0:
# print('Moving up', end=' --> ')
loc[0] -= 1
valid = True
# Move right
elif action == 1:
if loc[1] + 1 < length:
# print('Moving Right', end=' --> ')
loc[1] += 1
valid = True
# Move down
elif action == 2:
if loc[0] + 1 < length:
# print('Moving Down', end=' --> ')
loc[0] += 1
valid = True
# Move left
elif action == 3:
if loc[1] - 1 >= 0:
# print('Moving Left', end=' --> ')
loc[1] -= 1
valid = True
# One hot encode action
act = np.zeros(4)
act[action] = 1
return act, loc
def trajectory_4way(self, env):
"""
Generate trajectory of agent diffusing through 4-way connected graph
At each point we sample the one-hot observation and take an action
0 = up
1 = right
2 = down
3 = left
Params:
steps (int): Number of steps to take
env (3d np array): environment in which to wander (NxNx(num_categories))
Returns
Observations (steps, num_categories), Actions (steps, 4)
"""
observations = np.zeros((self.steps, self.num_categories))
actions = np.zeros((self.steps, 4))
positions = np.zeros((self.steps, 2))
loc = np.random.randint(0, env.shape[0], 2) # Initial Location
for step in range(self.steps):
positions[step] = loc
obs = env[loc[0], loc[1]] # Observe scene
action, loc = self.update_location_4way(env, loc) # Sample action and new location
observations[step] = obs
actions[step] = action
return observations, actions, positions
def generate_data(self, verbose=False):
"""
Generates N square environments and trajectories ((observation, action) pairs)
for each environment
Params:
envs (int): number of environments to generate
steps (int): how many steps an agent initially takes in each environment
env_size (tuple): size of environment (should be something like (4,4), (9,9), etc...)
save (bool): whether or not to save the dataset
Returns:
Dict of "environments, observations, actions", each corresponding to:
environments: Array shape: (num_envs, env_size_x, env_size_y, num_categories),
observations: Array shape: (num_envs, steps, num_categories),
actions: Array shape: (num_envs, steps, 4)
"""
env_size = self.env_size
if self.num_categories == None:
self.num_categories = env_size[0] * env_size[1]
self.environments = np.zeros((self.num_envs, env_size[0], env_size[1], self.num_categories))
self.observations = np.zeros((self.num_envs, self.steps, self.num_categories))
self.actions = np.zeros((self.num_envs, self.steps, 4))
self.positions = np.zeros((self.num_envs, self.steps, 2))
for i in range(self.num_envs):
env = self.square_env() # Generate new environment
obs, acts, pos = self.trajectory_4way(env) # Generate random walk for that environment
self.environments[i] = env
self.observations[i] = obs
self.actions[i] = acts
self.positions[i] = pos
self.data = {'environments': self.environments, 'observations': self.observations, 'actions': self.actions, 'positions': self.positions}
if self.save:
name = os.path.join(self.data_root, 'four_way_graph.pickle')
with open(name, 'wb') as handle:
pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
print('Generating 20 (8,8) environments with 256 random steps in each.')
graph = Graph4D(num_envs=20, env_size=(8,8), steps=256)
data = graph.data
envs = graph.environments
observations = graph.observations
actions = graph.actions
positions = graph.positions
print('Envs,', envs.shape)
print('Obs', observations.shape)
print('Acts', actions.shape)
print('Pos', positions.shape)
|
control
|
Return a (mutli-)controlled-RX gate.
Args:
num_ctrl_qubits (int): number of control qubits.
label (str or None): An optional label for the gate [Default: None]
ctrl_state (int or str or None): control state expressed as integer,
string (e.g. '110'), or None. If None, use all 1s.
Returns:
ControlledGate: controlled version of this gate.
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Rotation around the X axis."""
import math
import numpy
from qiskit.qasm import pi
from qiskit.circuit.controlledgate import ControlledGate
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
class RXGate(Gate):
r"""Single-qubit rotation about the X axis.
**Circuit symbol:**
.. parsed-literal::
┌───────┐
q_0: ┤ Rx(ϴ) ├
└───────┘
**Matrix Representation:**
.. math::
\newcommand{\th}{\frac{\theta}{2}}
RX(\theta) = exp(-i \th X) =
\begin{pmatrix}
\cos{\th} & -i\sin{\th} \\
-i\sin{\th} & \cos{\th}
\end{pmatrix}
"""
def __init__(self, theta, label=None):
"""Create new RX gate."""
super().__init__('rx', 1, [theta], label=label)
def _define(self):
"""
gate rx(theta) a {r(theta, 0) a;}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .r import RGate
q = QuantumRegister(1, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(RGate(self.params[0], 0), [q[0]], [])
]
qc._data = rules
self.definition = qc
# MASKED: control function (lines 66-82)
def inverse(self):
r"""Return inverted RX gate.
:math:`RX(\lambda)^{\dagger} = RX(-\lambda)`
"""
return RXGate(-self.params[0])
def to_matrix(self):
"""Return a numpy.array for the RX gate."""
cos = math.cos(self.params[0] / 2)
sin = math.sin(self.params[0] / 2)
return numpy.array([[cos, -1j * sin],
[-1j * sin, cos]], dtype=complex)
class CRXGate(ControlledGate):
r"""Controlled-RX gate.
**Circuit symbol:**
.. parsed-literal::
q_0: ────■────
┌───┴───┐
q_1: ┤ Rx(ϴ) ├
└───────┘
**Matrix representation:**
.. math::
\newcommand{\th}{\frac{\theta}{2}}
CRX(\lambda)\ q_0, q_1 =
I \otimes |0\rangle\langle 0| + RX(\theta) \otimes |1\rangle\langle 1| =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & \cos{\th} & 0 & -i\sin{\th} \\
0 & 0 & 1 & 0 \\
0 & -i\sin{\th} & 0 & \cos{\th}
\end{pmatrix}
.. note::
In Qiskit's convention, higher qubit indices are more significant
(little endian convention). In many textbooks, controlled gates are
presented with the assumption of more significant qubits as control,
which in our case would be q_1. Thus a textbook matrix for this
gate will be:
.. parsed-literal::
┌───────┐
q_0: ┤ Rx(ϴ) ├
└───┬───┘
q_1: ────■────
.. math::
\newcommand{\th}{\frac{\theta}{2}}
CRX(\theta)\ q_1, q_0 =
|0\rangle\langle0| \otimes I + |1\rangle\langle1| \otimes RX(\theta) =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & \cos{\th} & -i\sin{\th} \\
0 & 0 & -i\sin{\th} & \cos{\th}
\end{pmatrix}
"""
def __init__(self, theta, label=None, ctrl_state=None):
"""Create new CRX gate."""
super().__init__('crx', 2, [theta], num_ctrl_qubits=1,
label=label, ctrl_state=ctrl_state)
self.base_gate = RXGate(theta)
def _define(self):
"""
gate cu3(theta,phi,lambda) c, t
{ u1(pi/2) t;
cx c,t;
u3(-theta/2,0,0) t;
cx c,t;
u3(theta/2,-pi/2,0) t;
}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .u1 import U1Gate
from .u3 import U3Gate
from .x import CXGate
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(U1Gate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-self.params[0] / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(self.params[0] / 2, -pi / 2, 0), [q[1]], [])
]
qc._data = rules
self.definition = qc
def inverse(self):
"""Return inverse RX gate (i.e. with the negative rotation angle)."""
return CRXGate(-self.params[0])
def to_matrix(self):
"""Return a numpy.array for the CRX gate."""
half_theta = float(self.params[0]) / 2
cos = numpy.cos(half_theta)
isin = 1j * numpy.sin(half_theta)
if self.ctrl_state:
return numpy.array([[1, 0, 0, 0],
[0, cos, 0, -isin],
[0, 0, 1, 0],
[0, -isin, 0, cos]],
dtype=complex)
else:
return numpy.array([[cos, 0, -isin, 0],
[0, 1, 0, 0],
[-isin, 0, cos, 0],
[0, 0, 0, 1]],
dtype=complex)
|
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):
"""Return a (mutli-)controlled-RX gate.
Args:
num_ctrl_qubits (int): number of control qubits.
label (str or None): An optional label for the gate [Default: None]
ctrl_state (int or str or None): control state expressed as integer,
string (e.g. '110'), or None. If None, use all 1s.
Returns:
ControlledGate: controlled version of this gate.
"""
if num_ctrl_qubits == 1:
gate = CRXGate(self.params[0], label=label, ctrl_state=ctrl_state)
gate.base_gate.label = self.label
return gate
return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)
| 66
| 82
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Rotation around the X axis."""
import math
import numpy
from qiskit.qasm import pi
from qiskit.circuit.controlledgate import ControlledGate
from qiskit.circuit.gate import Gate
from qiskit.circuit.quantumregister import QuantumRegister
class RXGate(Gate):
r"""Single-qubit rotation about the X axis.
**Circuit symbol:**
.. parsed-literal::
┌───────┐
q_0: ┤ Rx(ϴ) ├
└───────┘
**Matrix Representation:**
.. math::
\newcommand{\th}{\frac{\theta}{2}}
RX(\theta) = exp(-i \th X) =
\begin{pmatrix}
\cos{\th} & -i\sin{\th} \\
-i\sin{\th} & \cos{\th}
\end{pmatrix}
"""
def __init__(self, theta, label=None):
"""Create new RX gate."""
super().__init__('rx', 1, [theta], label=label)
def _define(self):
"""
gate rx(theta) a {r(theta, 0) a;}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .r import RGate
q = QuantumRegister(1, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(RGate(self.params[0], 0), [q[0]], [])
]
qc._data = rules
self.definition = qc
def control(self, num_ctrl_qubits=1, label=None, ctrl_state=None):
"""Return a (mutli-)controlled-RX gate.
Args:
num_ctrl_qubits (int): number of control qubits.
label (str or None): An optional label for the gate [Default: None]
ctrl_state (int or str or None): control state expressed as integer,
string (e.g. '110'), or None. If None, use all 1s.
Returns:
ControlledGate: controlled version of this gate.
"""
if num_ctrl_qubits == 1:
gate = CRXGate(self.params[0], label=label, ctrl_state=ctrl_state)
gate.base_gate.label = self.label
return gate
return super().control(num_ctrl_qubits=num_ctrl_qubits, label=label, ctrl_state=ctrl_state)
def inverse(self):
r"""Return inverted RX gate.
:math:`RX(\lambda)^{\dagger} = RX(-\lambda)`
"""
return RXGate(-self.params[0])
def to_matrix(self):
"""Return a numpy.array for the RX gate."""
cos = math.cos(self.params[0] / 2)
sin = math.sin(self.params[0] / 2)
return numpy.array([[cos, -1j * sin],
[-1j * sin, cos]], dtype=complex)
class CRXGate(ControlledGate):
r"""Controlled-RX gate.
**Circuit symbol:**
.. parsed-literal::
q_0: ────■────
┌───┴───┐
q_1: ┤ Rx(ϴ) ├
└───────┘
**Matrix representation:**
.. math::
\newcommand{\th}{\frac{\theta}{2}}
CRX(\lambda)\ q_0, q_1 =
I \otimes |0\rangle\langle 0| + RX(\theta) \otimes |1\rangle\langle 1| =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & \cos{\th} & 0 & -i\sin{\th} \\
0 & 0 & 1 & 0 \\
0 & -i\sin{\th} & 0 & \cos{\th}
\end{pmatrix}
.. note::
In Qiskit's convention, higher qubit indices are more significant
(little endian convention). In many textbooks, controlled gates are
presented with the assumption of more significant qubits as control,
which in our case would be q_1. Thus a textbook matrix for this
gate will be:
.. parsed-literal::
┌───────┐
q_0: ┤ Rx(ϴ) ├
└───┬───┘
q_1: ────■────
.. math::
\newcommand{\th}{\frac{\theta}{2}}
CRX(\theta)\ q_1, q_0 =
|0\rangle\langle0| \otimes I + |1\rangle\langle1| \otimes RX(\theta) =
\begin{pmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & \cos{\th} & -i\sin{\th} \\
0 & 0 & -i\sin{\th} & \cos{\th}
\end{pmatrix}
"""
def __init__(self, theta, label=None, ctrl_state=None):
"""Create new CRX gate."""
super().__init__('crx', 2, [theta], num_ctrl_qubits=1,
label=label, ctrl_state=ctrl_state)
self.base_gate = RXGate(theta)
def _define(self):
"""
gate cu3(theta,phi,lambda) c, t
{ u1(pi/2) t;
cx c,t;
u3(-theta/2,0,0) t;
cx c,t;
u3(theta/2,-pi/2,0) t;
}
"""
# pylint: disable=cyclic-import
from qiskit.circuit.quantumcircuit import QuantumCircuit
from .u1 import U1Gate
from .u3 import U3Gate
from .x import CXGate
q = QuantumRegister(2, 'q')
qc = QuantumCircuit(q, name=self.name)
rules = [
(U1Gate(pi / 2), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(-self.params[0] / 2, 0, 0), [q[1]], []),
(CXGate(), [q[0], q[1]], []),
(U3Gate(self.params[0] / 2, -pi / 2, 0), [q[1]], [])
]
qc._data = rules
self.definition = qc
def inverse(self):
"""Return inverse RX gate (i.e. with the negative rotation angle)."""
return CRXGate(-self.params[0])
def to_matrix(self):
"""Return a numpy.array for the CRX gate."""
half_theta = float(self.params[0]) / 2
cos = numpy.cos(half_theta)
isin = 1j * numpy.sin(half_theta)
if self.ctrl_state:
return numpy.array([[1, 0, 0, 0],
[0, cos, 0, -isin],
[0, 0, 1, 0],
[0, -isin, 0, cos]],
dtype=complex)
else:
return numpy.array([[cos, 0, -isin, 0],
[0, 1, 0, 0],
[-isin, 0, cos, 0],
[0, 0, 0, 1]],
dtype=complex)
|
lookup
|
Looks up a specific information value based on either a string pattern
or a path.
For example, the pattern "stats.roc_auc.labels.true" is the same as
the path ``['stats', 'roc_auc', 'labels', True]``.
:Parameters:
path : `str` | `list`
The location of the information to lookup.
|
from collections import OrderedDict
from . import util
from ..errors import ModelInfoLookupError
class ModelInfo:
def __init__(self, pairs=[], default_fields=None):
"""
Constructs a mapping of information about a model.
:class:`~revscoring.scoring.ModelInfo` objects are usually nested
within each other to provide a convenient tree structure for
:func:`~revscoring.scoring.ModelInfo.lookup` and
:func:`~revscoring.scoring.ModelInfo.format`.
"""
self._data = OrderedDict(pairs)
self._default_fields = set(default_fields) \
if default_fields is not None else None
def __len__(self):
return len(self.keys())
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __contains__(self, key):
try:
return (key in self._data or
key in ('true', 'false') and key == 'true' in self._data or
int(key) in self._data)
except ValueError:
return False
def keys(self):
return self._data.keys()
def get(self, key, default=None):
return self._data.get(key, default)
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def __iter__(self):
return iter(self._data)
def move_to_end(self, key, last=True):
return self._data.move_to_end(key, last=last)
# MASKED: lookup function (lines 56-83)
def format(self, paths=None, formatting="str", **kwargs):
"""
Format a representation of the model information in a useful way.
:Parameters:
paths : `iterable` ( `str` | [`str`] )
A set of paths to use when selecting which information should
formatted. Everything beneath a provided path in the tree
will be formatted. E.g. `statistics.roc_auc` and `statistics`
will format redundantly because `roc_auc` is already within
`statistics`. Alternatively `statistics.roc_auc` and
`statistics.pr_auc` will format only those two specific
bits of information.
formatting : "json" or "str"
Which output formatting do you want? "str" returns something
nice to show on the command-line. "json" returns something
that will pass through :func:`json.dump` without error.
"""
paths = paths or []
_paths = [
util.parse_pattern(path) if isinstance(path, str) else path
for path in paths]
path_tree = util.treeify(_paths)
if formatting == "str":
return self.format_str(path_tree, **kwargs)
elif formatting == "json":
return self.format_json(path_tree, **kwargs)
else:
raise ValueError("Formatting {0} is not available for {1}."
.format(formatting, self.__class__.__name__))
def format_str(self, path_tree, **kwargs):
formatted = "Model Information:\n"
for key in self.normalize_fields(path_tree):
key_val = try_key(key, self)
if hasattr(key_val, "format_str"):
sub_tree = path_tree.get(key, {})
formatted += util.tab_it_in(
key_val.format_str(sub_tree, **kwargs))
else:
formatted += util.tab_it_in(" - {0}: {1}"
.format(key, key_val))
return formatted
def format_json(self, path_tree, **kwargs):
d = OrderedDict()
for key in self.normalize_fields(path_tree):
key_val = try_key(key, self)
if hasattr(key_val, "format_json"):
sub_tree = path_tree.get(key, {})
d[key] = key_val.format_json(sub_tree, **kwargs)
else:
d[key] = key_val
return d
def normalize_fields(self, path_tree):
if len(path_tree) > 0:
yield from path_tree.keys()
else:
for field in self.keys():
if self._default_fields is None or \
field in self._default_fields:
yield field
def try_key(key, d):
try:
return d[key]
except KeyError:
try:
if key in ("true", "false"):
return d[key == 'true']
else:
try:
return d[int(key)]
except ValueError:
raise ModelInfoLookupError(key)
except KeyError:
raise ModelInfoLookupError(key)
|
def lookup(self, path=None):
"""
Looks up a specific information value based on either a string pattern
or a path.
For example, the pattern "stats.roc_auc.labels.true" is the same as
the path ``['stats', 'roc_auc', 'labels', True]``.
:Parameters:
path : `str` | `list`
The location of the information to lookup.
"""
if isinstance(path, str):
path = util.parse_pattern(path)
elif path is None:
path = []
d = self
remaining_path = list(path) # Make sure we don't overwrite the input
while len(path) > 0:
key = path.pop(0)
d = try_key(key, d)
if hasattr(d, "lookup"):
return d.lookup(remaining_path)
else:
continue
return d
| 56
| 83
|
from collections import OrderedDict
from . import util
from ..errors import ModelInfoLookupError
class ModelInfo:
def __init__(self, pairs=[], default_fields=None):
"""
Constructs a mapping of information about a model.
:class:`~revscoring.scoring.ModelInfo` objects are usually nested
within each other to provide a convenient tree structure for
:func:`~revscoring.scoring.ModelInfo.lookup` and
:func:`~revscoring.scoring.ModelInfo.format`.
"""
self._data = OrderedDict(pairs)
self._default_fields = set(default_fields) \
if default_fields is not None else None
def __len__(self):
return len(self.keys())
def __getitem__(self, key):
return self._data[key]
def __setitem__(self, key, value):
self._data[key] = value
def __contains__(self, key):
try:
return (key in self._data or
key in ('true', 'false') and key == 'true' in self._data or
int(key) in self._data)
except ValueError:
return False
def keys(self):
return self._data.keys()
def get(self, key, default=None):
return self._data.get(key, default)
def values(self):
return self._data.values()
def items(self):
return self._data.items()
def __iter__(self):
return iter(self._data)
def move_to_end(self, key, last=True):
return self._data.move_to_end(key, last=last)
def lookup(self, path=None):
"""
Looks up a specific information value based on either a string pattern
or a path.
For example, the pattern "stats.roc_auc.labels.true" is the same as
the path ``['stats', 'roc_auc', 'labels', True]``.
:Parameters:
path : `str` | `list`
The location of the information to lookup.
"""
if isinstance(path, str):
path = util.parse_pattern(path)
elif path is None:
path = []
d = self
remaining_path = list(path) # Make sure we don't overwrite the input
while len(path) > 0:
key = path.pop(0)
d = try_key(key, d)
if hasattr(d, "lookup"):
return d.lookup(remaining_path)
else:
continue
return d
def format(self, paths=None, formatting="str", **kwargs):
"""
Format a representation of the model information in a useful way.
:Parameters:
paths : `iterable` ( `str` | [`str`] )
A set of paths to use when selecting which information should
formatted. Everything beneath a provided path in the tree
will be formatted. E.g. `statistics.roc_auc` and `statistics`
will format redundantly because `roc_auc` is already within
`statistics`. Alternatively `statistics.roc_auc` and
`statistics.pr_auc` will format only those two specific
bits of information.
formatting : "json" or "str"
Which output formatting do you want? "str" returns something
nice to show on the command-line. "json" returns something
that will pass through :func:`json.dump` without error.
"""
paths = paths or []
_paths = [
util.parse_pattern(path) if isinstance(path, str) else path
for path in paths]
path_tree = util.treeify(_paths)
if formatting == "str":
return self.format_str(path_tree, **kwargs)
elif formatting == "json":
return self.format_json(path_tree, **kwargs)
else:
raise ValueError("Formatting {0} is not available for {1}."
.format(formatting, self.__class__.__name__))
def format_str(self, path_tree, **kwargs):
formatted = "Model Information:\n"
for key in self.normalize_fields(path_tree):
key_val = try_key(key, self)
if hasattr(key_val, "format_str"):
sub_tree = path_tree.get(key, {})
formatted += util.tab_it_in(
key_val.format_str(sub_tree, **kwargs))
else:
formatted += util.tab_it_in(" - {0}: {1}"
.format(key, key_val))
return formatted
def format_json(self, path_tree, **kwargs):
d = OrderedDict()
for key in self.normalize_fields(path_tree):
key_val = try_key(key, self)
if hasattr(key_val, "format_json"):
sub_tree = path_tree.get(key, {})
d[key] = key_val.format_json(sub_tree, **kwargs)
else:
d[key] = key_val
return d
def normalize_fields(self, path_tree):
if len(path_tree) > 0:
yield from path_tree.keys()
else:
for field in self.keys():
if self._default_fields is None or \
field in self._default_fields:
yield field
def try_key(key, d):
try:
return d[key]
except KeyError:
try:
if key in ("true", "false"):
return d[key == 'true']
else:
try:
return d[int(key)]
except ValueError:
raise ModelInfoLookupError(key)
except KeyError:
raise ModelInfoLookupError(key)
|
_quote_str
|
Escape all unicode characters to there unicode code points in form of \uxxxx.
The returned string is a pure ascii string.
Normal ascii characters like \n or \t won't be escaped.
note: wxGlade don't handles file encoding well currently. Thereby
we escape all unicode characters.
note: The string 's' is encoded with self.app_encoding already.
see: BaseLangCodeWriter._quote_str for additional details
see: _recode_x80_xff()
|
"""\
Perl code generator
@copyright: 2002-2004 D.H. aka crazyinsomniac on sourceforge.net
@copyright: 2012-2016 Carsten Grohmann
@copyright: 2017-2020 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import os, os.path, re
from codegen import BaseLangCodeWriter, BaseSourceFileContent
import wcodegen, compat
import logging
class SourceFileContent(BaseSourceFileContent):
rec_block_start = re.compile(
r'^(?P<spaces>\s*)' # leading spaces
r'#\s*' # comment sign
r'begin\s+wxGlade:\s*' # "begin wxGlade:" statement and tailing spaces
r'(?P<classname>[a-zA-Z_]+[\w:]*?)??' # class or function name (non-greedy)
r'(?::{2}|\s*)' # separator between class and function / block (non-greedy)
r'(?P<block>\w+)' # function / block name
r'\s*$' # tailing spaces
)
rec_block_end = re.compile(
r'^\s*' # leading spaces
r'#\s*' # comment sign
r'end\s+wxGlade' # "end exGlade" statement
r'\s*$' # tailing spaces
)
# Less precise regex, but working :-P
# Should match: package Foo; or package Foo::bar::baz ;
rec_class_decl = re.compile(
r'^\s*' # leading spaces
r'package\s+([a-zA-Z_][\w:]*)\s*;' # "package <name>" statement
r'.*$' # any character till eol
)
rec_event_handler = re.compile(
r'^\s*' # leading spaces
r'#\s*wxGlade:\s*(?P<class>[\w:]+)::(?P<handler>\w+) <event_handler>' # wxGlade event handler
# statement with class and
# event handler name
r'\s*$' # tailing spaces
)
# Regexp to match Perl's Plain Old Documentation format; see: manpage perlpod
rec_pod = re.compile(
r'^\s*' # leading spaces
r'=[A-Za-z_]+\w*' # match POD statement
r'.*$' # any character till eol
)
def build_untouched_content(self):
"""\
Builds a string with the contents of the file that must be left as is,
and replaces the wxGlade blocks with tags that in turn will be replaced
by the new wxGlade blocks
WARNING: NOT YET COMPLETE -- crazyinsomniac
alb - almost done :)
WARNING: There is *NO* support for here documents: if you put wxGlade
blocks inside a here document, you're likely going into troubles...
"""
BaseSourceFileContent.build_untouched_content(self)
inside_block = False
inside_pod = False
tmp_in = self._load_file(self.name)
out_lines = []
check_old_methods = [] # list of indices with set_properties or do_layout
for line in tmp_in:
result = self.rec_pod.match(line)
if result:
inside_pod = True
if inside_pod:
out_lines.append(line)
if line.startswith('=cut'):
inside_pod = False
continue
result = self.rec_class_decl.match(line)
if result:
if not self.class_name:
# this is the first class declared in the file: insert the new ones before this
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
self.new_classes_inserted = True
self.class_name = result.group(1)
self.class_name = self.format_classname(self.class_name)
self.classes.add( self.class_name ) # add the found class to the list of classes of this module
out_lines.append(line)
elif not inside_block:
result = self.rec_block_start.match(line)
if result:
# replace the lines inside a wxGlade block with a tag that will be used later by add_class
spaces = result.group('spaces')
which_class = result.group('classname')
which_block = result.group('block')
if not which_class:
which_class = self.class_name
else:
which_class = self.format_classname(which_class)
self.spaces[which_class] = spaces
inside_block = True
if not self.class_name:
out_lines.append( '<%swxGlade replace %s>' % (self.nonce, which_block) )
else:
if which_block in ("__do_layout","__set_properties"):
# probably to be removed
check_old_methods.append( len(out_lines) )
out_lines.append( '<%swxGlade replace %s %s>' % (self.nonce, which_class, which_block) )
else:
result = self.rec_event_handler.match(line)
if result:
which_handler = result.group('handler')
which_class = self.format_classname(result.group('class'))
self.event_handlers.setdefault( which_class, set() ).add( which_handler )
if self.class_name and self.is_end_of_class(line):
# add extra event handlers here...
out_lines.append( '<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
out_lines.append(line)
else:
# ignore all the lines inside a wxGlade block
if self.rec_block_end.match(line):
inside_block = False
if not self.new_classes_inserted:
# if we are here, the previous ``version'' of the file did not contain any class, so we must add the
# new_classes tag at the end of the file
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
# when moving from 0.9 to 1.0: remove empty methods "do_layout" and "set_properties"
while check_old_methods:
i = check_old_methods.pop(-1)
if out_lines[i+1].strip()=='}': # just end of block -> remove incl. trailing empty lines
self._remove_method(out_lines, i-2, i+1)
# set the ``persistent'' content of the file
self.content = out_lines
class PerlCodeWriter(BaseLangCodeWriter, wcodegen.PerlMixin):
"Code writer class for writing Perl code out of the designed GUI elements; see: BaseLangCodeWriter"
_code_statements = {
'backgroundcolour': "%(objname)s->SetBackgroundColour(%(value)s);\n",
'disabled': "%(objname)s->Enable(0);\n",
'extraproperties': "%(objname)s->Set%(propname_cap)s(%(value)s);\n",
'focused': "%(objname)s->SetFocus();\n",
'foregroundcolour': "%(objname)s->SetForegroundColour(%(value)s);\n",
'hidden': "%(objname)s->Show(0);\n",
'setfont': "%(objname)s->SetFont(Wx::Font->new(%(size)s, %(family)s, "
"%(style)s, %(weight)s, %(underlined)s, %(face)s));\n",
'tooltip': "%(objname)s->SetToolTipString(%(tooltip)s);\n",
'tooltip_3': "%(objname)s->SetToolTip(%(tooltip)s);\n",
'wxcolour': "Wx::Colour->new(%(value)s)",
'wxnullcolour': "Wx::NullColour",
'wxsystemcolour': "Wx::SystemSettings::GetColour(%(value)s)",
}
class_separator = '::'
classattr_always = ['wxBoxSizer', 'wxStaticBoxSizer', 'wxGridSizer', 'wxFlexGridSizer']
indent_amount = 1
indent_symbol = '\t'
indent_level_func_body = 1
language_note = '# To get wxPerl visit http://www.wxperl.it\n' \
'#\n'
name_ctor = 'new'
new_defaults = [] # Default class members, will be initialised during new_project()
shebang = '#!/usr/bin/perl -w -- \n#\n'
SourceFileContent = SourceFileContent
tmpl_cfunc_end = '%(tab)sreturn $self;\n' \
'\n' \
'}\n' \
'\n'
tmpl_class_end = '\n%(comment)s end of class %(klass)s\n\n1;\n\n'
tmpl_class_end_nomarker = '\n\n1;\n\n'
tmpl_func_event_stub = """\
sub %(handler)s {
%(tab)smy ($self, $event) = @_;
%(tab)s# wxGlade: %(klass)s::%(handler)s <event_handler>
%(tab)swarn "Event handler (%(handler)s) not implemented";
%(tab)s$event->Skip;
%(tab)s# end wxGlade
}
"""
tmpl_func_empty = '%(tab)sreturn;\n'
tmpl_sizeritem = '%s->Add(%s, %s, %s, %s);\n'
tmpl_gridbagsizeritem = '%s->Add(%s, %s, %s, %s, %s);\n'
tmpl_gridbagsizerspacer = '%s->Add(%s, %s, %s, %s, %s, %s);\n'
tmpl_spacersize = '%s, %s'
tmpl_style = \
'%(tab)s$style = %(style)s\n' \
'%(tab)s%(tab)sunless defined $style;\n' \
'\n'
tmpl_toplevel_style = tmpl_style
tmpl_appfile = """%(overwrite)s%(header_lines)s"""
def _get_app_template(self, app, top_win):
'build template string for application'
if not self.app_name: return None
# XXX use Show() for frames/panels and ShowModal()/Destroy for dialogs
klass = app.klass
if self._use_gettext:
gettext1 = ['%(tab)smy $local = Wx::Locale->new("English", "en", "en"); # replace with ??',
'%(tab)s$local->AddCatalog("%(textdomain)s"); # replace with the appropriate catalog name\n']
else:
gettext1 = []
if klass:
ret = [ 'package %(klass)s;',
'',
'use base qw(Wx::App);',
'use strict;',
'%(pl_import)s',
'sub OnInit {',
'%(tab)smy( $self ) = shift;',
'',
'%(tab)sWx::InitAllImageHandlers();',
'',
'%(tab)smy $%(top_win)s = %(top_win_class)s->new();',
'',
'%(tab)s$self->SetTopWindow($%(top_win)s);',
'%(tab)s$%(top_win)s->Show(1);',
'',
'%(tab)sreturn 1;',
'}']
if self._mark_blocks:
ret.append('# end of class %(klass)s')
ret += ['',
'package main;',
'',
'unless(caller){'] + gettext1 + [
'%(tab)smy $%(name)s = %(klass)s->new();',
'%(tab)s$%(name)s->MainLoop();',
'}', '']
else:
ret = ['1;',
'',
'package main;',
'%(pl_import)s',
'unless(caller){'] + gettext1 + [
'%(tab)slocal *Wx::App::OnInit = sub{1};',
'%(tab)smy $%(name)s = Wx::App->new();',
'%(tab)sWx::InitAllImageHandlers();',
'',
'%(tab)smy $%(top_win)s = %(top_win_class)s->new();',
'',
'%(tab)s$%(name)s->SetTopWindow($%(top_win)s);',
'%(tab)s$%(top_win)s->Show(1);',
'%(tab)s$%(name)s->MainLoop();',
'}', '']
return '\n'.join(ret)
def init_lang(self, app_attrs):
# initial new defaults late to use the proper indent characters
tab = self.tabs(1)
self.new_defaults = {
'$parent' : '%s$parent = undef unless defined $parent;\n' % tab,
'$id' : '%s$id = -1 unless defined $id;\n' % tab,
'$title' : '%s$title = "" unless defined $title;\n' % tab,
'$pos' : '%s$pos = wxDefaultPosition unless defined $pos;\n' % tab,
'$size' : '%s$size = wxDefaultSize unless defined $size;\n' % tab,
'$name' : '%s$name = "" unless defined $name;\n\n' % tab,
#'$style' is a special case
}
self.header_lines = [
'use Wx qw[:allclasses];\n',
'use strict;\n'
]
def add_app(self, app_attrs, top_win):
# add language specific mappings
if self.multiple_files:
self.lang_mapping['pl_import'] = "\nuse %s;\n" % top_win.klass
else:
self.lang_mapping['pl_import'] = ''
BaseLangCodeWriter.add_app(self, app_attrs, top_win)
def generate_code_ctor(self, code_obj, is_new, tab):
code_lines = []
write = code_lines.append
builder = self.obj_builders[code_obj.WX_CLASS]
mycn = getattr(builder, 'cn', self.cn)
mycn_f = getattr(builder, 'cn_f', self.cn_f)
# custom base classes support
custom_base = code_obj.check_prop_nodefault('custom_base') and code_obj.custom_base.strip() or None
new_signature = getattr(builder, 'new_signature', [])
# generate constructor code
if is_new:
write('package %s;\n\n' % code_obj.klass)
write('use Wx qw[:everything];\nuse base qw(%s);\nuse strict;\n\n' % code_obj.WX_CLASS.replace('wx', 'Wx::', 1))
if self._use_gettext:
if self.multiple_files:
self.classes[code_obj].dependencies.add( "use Wx::Locale gettext => '_T';\n" )
else:
write("use Wx::Locale gettext => '_T';\n")
# The dependencies have to add to the package block too because global imports are not visible inside the
# package block
# TODO: Don't add dependencies twice with Perl
# write the module dependencies for this class (package)
dep_list = sorted( self.classes[code_obj].dependencies )
if dep_list:
code = self._tagcontent('dependencies', dep_list, True)
write(code)
write('sub new {\n')
write(tab + "my( $self, %s ) = @_;\n" % ", ".join(new_signature))
if new_signature:
for k in new_signature:
if k in self.new_defaults:
write(self.new_defaults[k])
else:
new_signature = ['@_[1 .. $#_]'] # shift(@_)->SUPER::new(@_);
logging.info( "%s did not declare self.new_defaults ", code_obj.klass )
elif custom_base:
# custom base classes set, but "overwrite existing sources" not set. Issue a warning about this
self.warning( '%s has custom base classes, but you are not overwriting existing sources: '
'please check that the resulting code is correct!' % code_obj.name )
if self._mark_blocks:
# __init__ begin tag
write(self.tmpl_block_begin % {'class_separator':self.class_separator, 'comment_sign':self.comment_sign,
'function':self.name_ctor, 'klass':self.cn_class(code_obj.klass),
'tab':tab} )
# the optional initial code from the code properties
if not self.preview and code_obj.check_prop("extracode_pre"):
for l in code_obj.properties["extracode_pre"].get_lines():
write(tab + l)
style_p = code_obj.properties.get("style")
if style_p and style_p.value_set != style_p.default_value:
style = style_p.get_string_value()
m_style = mycn_f( style )
if m_style:
stmt_style = self._format_style(style, code_obj)
write( stmt_style % {'style':m_style, 'tab':tab} )
# class parent constructor
write(tab + '$self = $self->SUPER::new( %s );\n' % ", ".join(new_signature))
# set size here to avoid problems with splitter windows
if code_obj.check_prop('size'):
write( tab + self.generate_code_size(code_obj) )
for l in builder.get_properties_code(code_obj):
write(tab + l)
if code_obj.check_prop_truth('extraproperties'):
for l in builder.generate_code_extraproperties(code_obj):
write(tab + l)
# the initial and final code for the contained elements
for l in self.classes[code_obj].init:
write(tab + l)
if self.classes[code_obj].final:
write(tab + "\n")
for l in self.classes[code_obj].final:
write(tab + l)
# now check if there is initial and final code for the element itself
for l in builder.get_init_code(code_obj):
write(tab+l)
for l in builder.get_layout_code(code_obj):
write(tab + l)
# the optional final code from the code properties
if not self.preview and code_obj.check_prop("extracode_post"):
for l in code_obj.properties["extracode_post"].get_lines():
write(tab + l)
return code_lines
def generate_code_event_bind(self, code_obj, tab, event_handlers):
code_lines = []
for obj, event, handler, unused in event_handlers:
if obj.name:
obj_id = '%s->GetId'%self.format_generic_access(obj) # e.g. '$self->{button_1}->GetId' or '$self->GetId'
else:
obj_id = self.generate_code_id(None, obj.id)[1] or '-1' # but this is wrong anyway...
if 'EVT_NAVIGATION_KEY' in event:
tmpl = '''%(tab)s%(event)s($self, $self->can('%(handler)s'));\n'''
else:
tmpl = '''%(tab)s%(event)s($self, %(obj_id)s, $self->can('%(handler)s'));\n'''
code_lines.append( tmpl % {'tab': tab, 'event': self.cn(event), 'handler': handler, 'obj_id': obj_id} )
if event_handlers:
code_lines.append('\n')
return code_lines
def generate_code_id(self, obj, id=None):
if id is None:
id = obj.window_id
if not id:
if obj is not None and obj.check_prop_truth("stockitem"):
return '', self.cn("wxID_" + obj.stockitem)
return '', self.cn('wxID_ANY')
id = str(id)
tokens = id.split('=', 1)
if len(tokens) != 2:
return '', self.cn(tokens[0]) # we assume name is declared elsewhere
name, val = tokens
if not name:
return '', self.cn(val)
name = name.strip()
val = val.strip()
if val == '?':
val = self.cn('wxNewId()')
else:
val = self.cn(val)
# check to see if we have to make the var global or not...
return 'use constant %s => %s;\n' % (name, val), name
def generate_code_size(self, obj):
objname = self.format_generic_access(obj)
size = obj.properties["size"].get_string_value()
use_dialog_units = (size[-1] == 'd')
method = 'SetMinSize' if obj.parent_window else 'SetSize'
if use_dialog_units:
return '%s->%s(%s->ConvertDialogSizeToPixels(Wx::Size->new(%s)));\n' % (objname, method, objname, size[:-1])
return '%s->%s(Wx::Size->new(%s));\n' % (objname, method, size)
# MASKED: _quote_str function (lines 462-504)
def add_object_format_name(self, name):
return '#$self->%s' % name
def _format_classattr(self, obj):
res = BaseLangCodeWriter._format_classattr(self, obj)
if not res:
return res
elif obj.name.startswith('$self->'):
return obj.name
elif obj.name.startswith('$'):
return obj.name
# spacer.name is "<width>, <height>" already
elif obj.WX_CLASS == 'spacer':
return obj.name
# Perl stores sizers always in class attributes
elif self.store_as_attr(obj) or obj.IS_SIZER:
return '$self->{%s}' % obj.name
return '$%s' % obj.name
def _format_import(self, klass):
return 'use %s;\n' % klass
def _get_class_filename(self, klass):
"Returns the name for a Perl module (.pm) to store a single class in multi file projects"
return os.path.join( self.out_dir, klass.replace('::', os.sep) + '.pm' )
def format_generic_access(self, obj):
if obj.IS_CLASS:
return '$self'
return self._format_classattr(obj)
writer = PerlCodeWriter() # the code writer instance
language = writer.language # Language generated by this code generator
|
def _quote_str(self, s):
"""Escape all unicode characters to there unicode code points in form of \\uxxxx.
The returned string is a pure ascii string.
Normal ascii characters like \\n or \\t won't be escaped.
note: wxGlade don't handles file encoding well currently. Thereby
we escape all unicode characters.
note: The string 's' is encoded with self.app_encoding already.
see: BaseLangCodeWriter._quote_str for additional details
see: _recode_x80_xff()"""
s = s.replace('$', r'\$')
s = s.replace('@', r'\@')
# convert all strings to unicode first
if not isinstance(s, compat.unicode):
s = s.decode(self.app_encoding)
# check if it's pure ascii
try:
dummy = s.encode('ascii')
if self._use_gettext:
return '_T("%s")' % s
else:
return '"%s"' % s
except UnicodeError:
pass
# convert unicode strings to pure ascii
# use "raw-unicode-escape" just escaped unicode characters and not default escape sequences
s = s.encode('raw-unicode-escape')
s = self._recode_x80_xff(s)
if compat.PYTHON3:
# convert back to str (unicode)
s = s.decode("ASCII")
# convert Python style to Perl style
s = re.sub(r'\\u([0-9a-f]{4})', r'\\N{U+\1}', s)
if self._use_gettext:
return '_T("%s")' % s
else:
return '"%s"' % s
| 462
| 504
|
"""\
Perl code generator
@copyright: 2002-2004 D.H. aka crazyinsomniac on sourceforge.net
@copyright: 2012-2016 Carsten Grohmann
@copyright: 2017-2020 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import os, os.path, re
from codegen import BaseLangCodeWriter, BaseSourceFileContent
import wcodegen, compat
import logging
class SourceFileContent(BaseSourceFileContent):
rec_block_start = re.compile(
r'^(?P<spaces>\s*)' # leading spaces
r'#\s*' # comment sign
r'begin\s+wxGlade:\s*' # "begin wxGlade:" statement and tailing spaces
r'(?P<classname>[a-zA-Z_]+[\w:]*?)??' # class or function name (non-greedy)
r'(?::{2}|\s*)' # separator between class and function / block (non-greedy)
r'(?P<block>\w+)' # function / block name
r'\s*$' # tailing spaces
)
rec_block_end = re.compile(
r'^\s*' # leading spaces
r'#\s*' # comment sign
r'end\s+wxGlade' # "end exGlade" statement
r'\s*$' # tailing spaces
)
# Less precise regex, but working :-P
# Should match: package Foo; or package Foo::bar::baz ;
rec_class_decl = re.compile(
r'^\s*' # leading spaces
r'package\s+([a-zA-Z_][\w:]*)\s*;' # "package <name>" statement
r'.*$' # any character till eol
)
rec_event_handler = re.compile(
r'^\s*' # leading spaces
r'#\s*wxGlade:\s*(?P<class>[\w:]+)::(?P<handler>\w+) <event_handler>' # wxGlade event handler
# statement with class and
# event handler name
r'\s*$' # tailing spaces
)
# Regexp to match Perl's Plain Old Documentation format; see: manpage perlpod
rec_pod = re.compile(
r'^\s*' # leading spaces
r'=[A-Za-z_]+\w*' # match POD statement
r'.*$' # any character till eol
)
def build_untouched_content(self):
"""\
Builds a string with the contents of the file that must be left as is,
and replaces the wxGlade blocks with tags that in turn will be replaced
by the new wxGlade blocks
WARNING: NOT YET COMPLETE -- crazyinsomniac
alb - almost done :)
WARNING: There is *NO* support for here documents: if you put wxGlade
blocks inside a here document, you're likely going into troubles...
"""
BaseSourceFileContent.build_untouched_content(self)
inside_block = False
inside_pod = False
tmp_in = self._load_file(self.name)
out_lines = []
check_old_methods = [] # list of indices with set_properties or do_layout
for line in tmp_in:
result = self.rec_pod.match(line)
if result:
inside_pod = True
if inside_pod:
out_lines.append(line)
if line.startswith('=cut'):
inside_pod = False
continue
result = self.rec_class_decl.match(line)
if result:
if not self.class_name:
# this is the first class declared in the file: insert the new ones before this
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
self.new_classes_inserted = True
self.class_name = result.group(1)
self.class_name = self.format_classname(self.class_name)
self.classes.add( self.class_name ) # add the found class to the list of classes of this module
out_lines.append(line)
elif not inside_block:
result = self.rec_block_start.match(line)
if result:
# replace the lines inside a wxGlade block with a tag that will be used later by add_class
spaces = result.group('spaces')
which_class = result.group('classname')
which_block = result.group('block')
if not which_class:
which_class = self.class_name
else:
which_class = self.format_classname(which_class)
self.spaces[which_class] = spaces
inside_block = True
if not self.class_name:
out_lines.append( '<%swxGlade replace %s>' % (self.nonce, which_block) )
else:
if which_block in ("__do_layout","__set_properties"):
# probably to be removed
check_old_methods.append( len(out_lines) )
out_lines.append( '<%swxGlade replace %s %s>' % (self.nonce, which_class, which_block) )
else:
result = self.rec_event_handler.match(line)
if result:
which_handler = result.group('handler')
which_class = self.format_classname(result.group('class'))
self.event_handlers.setdefault( which_class, set() ).add( which_handler )
if self.class_name and self.is_end_of_class(line):
# add extra event handlers here...
out_lines.append( '<%swxGlade event_handlers %s>' % (self.nonce, self.class_name) )
out_lines.append(line)
else:
# ignore all the lines inside a wxGlade block
if self.rec_block_end.match(line):
inside_block = False
if not self.new_classes_inserted:
# if we are here, the previous ``version'' of the file did not contain any class, so we must add the
# new_classes tag at the end of the file
out_lines.append( '<%swxGlade insert new_classes>' % self.nonce )
# when moving from 0.9 to 1.0: remove empty methods "do_layout" and "set_properties"
while check_old_methods:
i = check_old_methods.pop(-1)
if out_lines[i+1].strip()=='}': # just end of block -> remove incl. trailing empty lines
self._remove_method(out_lines, i-2, i+1)
# set the ``persistent'' content of the file
self.content = out_lines
class PerlCodeWriter(BaseLangCodeWriter, wcodegen.PerlMixin):
"Code writer class for writing Perl code out of the designed GUI elements; see: BaseLangCodeWriter"
_code_statements = {
'backgroundcolour': "%(objname)s->SetBackgroundColour(%(value)s);\n",
'disabled': "%(objname)s->Enable(0);\n",
'extraproperties': "%(objname)s->Set%(propname_cap)s(%(value)s);\n",
'focused': "%(objname)s->SetFocus();\n",
'foregroundcolour': "%(objname)s->SetForegroundColour(%(value)s);\n",
'hidden': "%(objname)s->Show(0);\n",
'setfont': "%(objname)s->SetFont(Wx::Font->new(%(size)s, %(family)s, "
"%(style)s, %(weight)s, %(underlined)s, %(face)s));\n",
'tooltip': "%(objname)s->SetToolTipString(%(tooltip)s);\n",
'tooltip_3': "%(objname)s->SetToolTip(%(tooltip)s);\n",
'wxcolour': "Wx::Colour->new(%(value)s)",
'wxnullcolour': "Wx::NullColour",
'wxsystemcolour': "Wx::SystemSettings::GetColour(%(value)s)",
}
class_separator = '::'
classattr_always = ['wxBoxSizer', 'wxStaticBoxSizer', 'wxGridSizer', 'wxFlexGridSizer']
indent_amount = 1
indent_symbol = '\t'
indent_level_func_body = 1
language_note = '# To get wxPerl visit http://www.wxperl.it\n' \
'#\n'
name_ctor = 'new'
new_defaults = [] # Default class members, will be initialised during new_project()
shebang = '#!/usr/bin/perl -w -- \n#\n'
SourceFileContent = SourceFileContent
tmpl_cfunc_end = '%(tab)sreturn $self;\n' \
'\n' \
'}\n' \
'\n'
tmpl_class_end = '\n%(comment)s end of class %(klass)s\n\n1;\n\n'
tmpl_class_end_nomarker = '\n\n1;\n\n'
tmpl_func_event_stub = """\
sub %(handler)s {
%(tab)smy ($self, $event) = @_;
%(tab)s# wxGlade: %(klass)s::%(handler)s <event_handler>
%(tab)swarn "Event handler (%(handler)s) not implemented";
%(tab)s$event->Skip;
%(tab)s# end wxGlade
}
"""
tmpl_func_empty = '%(tab)sreturn;\n'
tmpl_sizeritem = '%s->Add(%s, %s, %s, %s);\n'
tmpl_gridbagsizeritem = '%s->Add(%s, %s, %s, %s, %s);\n'
tmpl_gridbagsizerspacer = '%s->Add(%s, %s, %s, %s, %s, %s);\n'
tmpl_spacersize = '%s, %s'
tmpl_style = \
'%(tab)s$style = %(style)s\n' \
'%(tab)s%(tab)sunless defined $style;\n' \
'\n'
tmpl_toplevel_style = tmpl_style
tmpl_appfile = """%(overwrite)s%(header_lines)s"""
def _get_app_template(self, app, top_win):
'build template string for application'
if not self.app_name: return None
# XXX use Show() for frames/panels and ShowModal()/Destroy for dialogs
klass = app.klass
if self._use_gettext:
gettext1 = ['%(tab)smy $local = Wx::Locale->new("English", "en", "en"); # replace with ??',
'%(tab)s$local->AddCatalog("%(textdomain)s"); # replace with the appropriate catalog name\n']
else:
gettext1 = []
if klass:
ret = [ 'package %(klass)s;',
'',
'use base qw(Wx::App);',
'use strict;',
'%(pl_import)s',
'sub OnInit {',
'%(tab)smy( $self ) = shift;',
'',
'%(tab)sWx::InitAllImageHandlers();',
'',
'%(tab)smy $%(top_win)s = %(top_win_class)s->new();',
'',
'%(tab)s$self->SetTopWindow($%(top_win)s);',
'%(tab)s$%(top_win)s->Show(1);',
'',
'%(tab)sreturn 1;',
'}']
if self._mark_blocks:
ret.append('# end of class %(klass)s')
ret += ['',
'package main;',
'',
'unless(caller){'] + gettext1 + [
'%(tab)smy $%(name)s = %(klass)s->new();',
'%(tab)s$%(name)s->MainLoop();',
'}', '']
else:
ret = ['1;',
'',
'package main;',
'%(pl_import)s',
'unless(caller){'] + gettext1 + [
'%(tab)slocal *Wx::App::OnInit = sub{1};',
'%(tab)smy $%(name)s = Wx::App->new();',
'%(tab)sWx::InitAllImageHandlers();',
'',
'%(tab)smy $%(top_win)s = %(top_win_class)s->new();',
'',
'%(tab)s$%(name)s->SetTopWindow($%(top_win)s);',
'%(tab)s$%(top_win)s->Show(1);',
'%(tab)s$%(name)s->MainLoop();',
'}', '']
return '\n'.join(ret)
def init_lang(self, app_attrs):
# initial new defaults late to use the proper indent characters
tab = self.tabs(1)
self.new_defaults = {
'$parent' : '%s$parent = undef unless defined $parent;\n' % tab,
'$id' : '%s$id = -1 unless defined $id;\n' % tab,
'$title' : '%s$title = "" unless defined $title;\n' % tab,
'$pos' : '%s$pos = wxDefaultPosition unless defined $pos;\n' % tab,
'$size' : '%s$size = wxDefaultSize unless defined $size;\n' % tab,
'$name' : '%s$name = "" unless defined $name;\n\n' % tab,
#'$style' is a special case
}
self.header_lines = [
'use Wx qw[:allclasses];\n',
'use strict;\n'
]
def add_app(self, app_attrs, top_win):
# add language specific mappings
if self.multiple_files:
self.lang_mapping['pl_import'] = "\nuse %s;\n" % top_win.klass
else:
self.lang_mapping['pl_import'] = ''
BaseLangCodeWriter.add_app(self, app_attrs, top_win)
def generate_code_ctor(self, code_obj, is_new, tab):
code_lines = []
write = code_lines.append
builder = self.obj_builders[code_obj.WX_CLASS]
mycn = getattr(builder, 'cn', self.cn)
mycn_f = getattr(builder, 'cn_f', self.cn_f)
# custom base classes support
custom_base = code_obj.check_prop_nodefault('custom_base') and code_obj.custom_base.strip() or None
new_signature = getattr(builder, 'new_signature', [])
# generate constructor code
if is_new:
write('package %s;\n\n' % code_obj.klass)
write('use Wx qw[:everything];\nuse base qw(%s);\nuse strict;\n\n' % code_obj.WX_CLASS.replace('wx', 'Wx::', 1))
if self._use_gettext:
if self.multiple_files:
self.classes[code_obj].dependencies.add( "use Wx::Locale gettext => '_T';\n" )
else:
write("use Wx::Locale gettext => '_T';\n")
# The dependencies have to add to the package block too because global imports are not visible inside the
# package block
# TODO: Don't add dependencies twice with Perl
# write the module dependencies for this class (package)
dep_list = sorted( self.classes[code_obj].dependencies )
if dep_list:
code = self._tagcontent('dependencies', dep_list, True)
write(code)
write('sub new {\n')
write(tab + "my( $self, %s ) = @_;\n" % ", ".join(new_signature))
if new_signature:
for k in new_signature:
if k in self.new_defaults:
write(self.new_defaults[k])
else:
new_signature = ['@_[1 .. $#_]'] # shift(@_)->SUPER::new(@_);
logging.info( "%s did not declare self.new_defaults ", code_obj.klass )
elif custom_base:
# custom base classes set, but "overwrite existing sources" not set. Issue a warning about this
self.warning( '%s has custom base classes, but you are not overwriting existing sources: '
'please check that the resulting code is correct!' % code_obj.name )
if self._mark_blocks:
# __init__ begin tag
write(self.tmpl_block_begin % {'class_separator':self.class_separator, 'comment_sign':self.comment_sign,
'function':self.name_ctor, 'klass':self.cn_class(code_obj.klass),
'tab':tab} )
# the optional initial code from the code properties
if not self.preview and code_obj.check_prop("extracode_pre"):
for l in code_obj.properties["extracode_pre"].get_lines():
write(tab + l)
style_p = code_obj.properties.get("style")
if style_p and style_p.value_set != style_p.default_value:
style = style_p.get_string_value()
m_style = mycn_f( style )
if m_style:
stmt_style = self._format_style(style, code_obj)
write( stmt_style % {'style':m_style, 'tab':tab} )
# class parent constructor
write(tab + '$self = $self->SUPER::new( %s );\n' % ", ".join(new_signature))
# set size here to avoid problems with splitter windows
if code_obj.check_prop('size'):
write( tab + self.generate_code_size(code_obj) )
for l in builder.get_properties_code(code_obj):
write(tab + l)
if code_obj.check_prop_truth('extraproperties'):
for l in builder.generate_code_extraproperties(code_obj):
write(tab + l)
# the initial and final code for the contained elements
for l in self.classes[code_obj].init:
write(tab + l)
if self.classes[code_obj].final:
write(tab + "\n")
for l in self.classes[code_obj].final:
write(tab + l)
# now check if there is initial and final code for the element itself
for l in builder.get_init_code(code_obj):
write(tab+l)
for l in builder.get_layout_code(code_obj):
write(tab + l)
# the optional final code from the code properties
if not self.preview and code_obj.check_prop("extracode_post"):
for l in code_obj.properties["extracode_post"].get_lines():
write(tab + l)
return code_lines
def generate_code_event_bind(self, code_obj, tab, event_handlers):
code_lines = []
for obj, event, handler, unused in event_handlers:
if obj.name:
obj_id = '%s->GetId'%self.format_generic_access(obj) # e.g. '$self->{button_1}->GetId' or '$self->GetId'
else:
obj_id = self.generate_code_id(None, obj.id)[1] or '-1' # but this is wrong anyway...
if 'EVT_NAVIGATION_KEY' in event:
tmpl = '''%(tab)s%(event)s($self, $self->can('%(handler)s'));\n'''
else:
tmpl = '''%(tab)s%(event)s($self, %(obj_id)s, $self->can('%(handler)s'));\n'''
code_lines.append( tmpl % {'tab': tab, 'event': self.cn(event), 'handler': handler, 'obj_id': obj_id} )
if event_handlers:
code_lines.append('\n')
return code_lines
def generate_code_id(self, obj, id=None):
if id is None:
id = obj.window_id
if not id:
if obj is not None and obj.check_prop_truth("stockitem"):
return '', self.cn("wxID_" + obj.stockitem)
return '', self.cn('wxID_ANY')
id = str(id)
tokens = id.split('=', 1)
if len(tokens) != 2:
return '', self.cn(tokens[0]) # we assume name is declared elsewhere
name, val = tokens
if not name:
return '', self.cn(val)
name = name.strip()
val = val.strip()
if val == '?':
val = self.cn('wxNewId()')
else:
val = self.cn(val)
# check to see if we have to make the var global or not...
return 'use constant %s => %s;\n' % (name, val), name
def generate_code_size(self, obj):
objname = self.format_generic_access(obj)
size = obj.properties["size"].get_string_value()
use_dialog_units = (size[-1] == 'd')
method = 'SetMinSize' if obj.parent_window else 'SetSize'
if use_dialog_units:
return '%s->%s(%s->ConvertDialogSizeToPixels(Wx::Size->new(%s)));\n' % (objname, method, objname, size[:-1])
return '%s->%s(Wx::Size->new(%s));\n' % (objname, method, size)
def _quote_str(self, s):
"""Escape all unicode characters to there unicode code points in form of \\uxxxx.
The returned string is a pure ascii string.
Normal ascii characters like \\n or \\t won't be escaped.
note: wxGlade don't handles file encoding well currently. Thereby
we escape all unicode characters.
note: The string 's' is encoded with self.app_encoding already.
see: BaseLangCodeWriter._quote_str for additional details
see: _recode_x80_xff()"""
s = s.replace('$', r'\$')
s = s.replace('@', r'\@')
# convert all strings to unicode first
if not isinstance(s, compat.unicode):
s = s.decode(self.app_encoding)
# check if it's pure ascii
try:
dummy = s.encode('ascii')
if self._use_gettext:
return '_T("%s")' % s
else:
return '"%s"' % s
except UnicodeError:
pass
# convert unicode strings to pure ascii
# use "raw-unicode-escape" just escaped unicode characters and not default escape sequences
s = s.encode('raw-unicode-escape')
s = self._recode_x80_xff(s)
if compat.PYTHON3:
# convert back to str (unicode)
s = s.decode("ASCII")
# convert Python style to Perl style
s = re.sub(r'\\u([0-9a-f]{4})', r'\\N{U+\1}', s)
if self._use_gettext:
return '_T("%s")' % s
else:
return '"%s"' % s
def add_object_format_name(self, name):
return '#$self->%s' % name
def _format_classattr(self, obj):
res = BaseLangCodeWriter._format_classattr(self, obj)
if not res:
return res
elif obj.name.startswith('$self->'):
return obj.name
elif obj.name.startswith('$'):
return obj.name
# spacer.name is "<width>, <height>" already
elif obj.WX_CLASS == 'spacer':
return obj.name
# Perl stores sizers always in class attributes
elif self.store_as_attr(obj) or obj.IS_SIZER:
return '$self->{%s}' % obj.name
return '$%s' % obj.name
def _format_import(self, klass):
return 'use %s;\n' % klass
def _get_class_filename(self, klass):
"Returns the name for a Perl module (.pm) to store a single class in multi file projects"
return os.path.join( self.out_dir, klass.replace('::', os.sep) + '.pm' )
def format_generic_access(self, obj):
if obj.IS_CLASS:
return '$self'
return self._format_classattr(obj)
writer = PerlCodeWriter() # the code writer instance
language = writer.language # Language generated by this code generator
|
disk_image_batch_dataset
|
Disk image batch dataset.
This function is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
from functools import partial
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from general.utilTF1.utils import session
from general.kneeOsteoarthritisDataset.KneeOsteoarthritsDataset import KneeOsteoarthritsDataset
_N_CPU = multiprocessing.cpu_count()
def batch_dataset(dataset, batch_size, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):
if filter:
dataset = dataset.filter(filter)
if map_func:
dataset = dataset.map(map_func, num_parallel_calls=num_threads)
if shuffle:
dataset = dataset.shuffle(buffer_size)
if drop_remainder:
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
else:
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(repeat).prefetch(prefetch_batch)
return dataset
class Dataset(object):
def __init__(self):
self._dataset = None
self._iterator = None
self._batch_op = None
self._sess = None
self._is_eager = tf.executing_eagerly()
self._eager_iterator = None
def __del__(self):
if self._sess:
self._sess.close()
def __iter__(self):
return self
def __next__(self):
try:
b = self.get_next()
except:
raise StopIteration
else:
return b
next = __next__
def get_next(self):
if self._is_eager:
return self._eager_iterator.get_next()
else:
return self._sess.run(self._batch_op)
def reset(self, feed_dict={}):
if self._is_eager:
self._eager_iterator = tfe.Iterator(self._dataset)
else:
self._sess.run(self._iterator.initializer, feed_dict=feed_dict)
def _bulid(self, dataset, sess=None):
self._dataset = dataset
if self._is_eager:
self._eager_iterator = tfe.Iterator(dataset)
else:
self._iterator = dataset.make_initializable_iterator()
self._batch_op = self._iterator.get_next()
if sess:
self._sess = sess
else:
self._sess = session()
try:
self.reset()
except:
pass
@property
def dataset(self):
return self._dataset
@property
def iterator(self):
return self._iterator
@property
def batch_op(self):
return self._batch_op
def get_dataset(data_set_path,shuffle=True):
# shape
img_shape = [256,256, 1]
# dataset
def _map_func(img,label):
img = tf.image.resize_images(img, [img_shape[0], img_shape[1]], method=tf.image.ResizeMethod.BICUBIC)
img = tf.clip_by_value(tf.cast(img, tf.float32), 0, 255) / 255 # / 127.5 - 1
return img,label
# get image pathes
#
kneeosteo_train = KneeOsteoarthritsDataset(data_path=data_set_path)
labels = list(kneeosteo_train.dict_url_class.values())
paths = list(kneeosteo_train.dict_url_class.keys())
assert (len(paths) == len(labels))
print('The dataset %s has %f elements. ' % (data_set_path, len(labels)))
Dataset = partial(DiskImageData, img_paths=paths,labels=labels, repeat=1, map_func=_map_func,shuffle=shuffle)
# index func
def get_imgs(batch):
return batch
return Dataset, img_shape, get_imgs
# MASKED: disk_image_batch_dataset function (lines 136-169)
class DiskImageData(Dataset):
"""DiskImageData.
This class is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list or tensor, each of which is a corresponding label
"""
def __init__(self, img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1, sess=None):
super(DiskImageData, self).__init__()
dataset = disk_image_batch_dataset(img_paths, batch_size, labels, prefetch_batch, drop_remainder, filter,
map_func, num_threads, shuffle, buffer_size, repeat)
self._bulid(dataset, sess)
self._n_data = len(img_paths)
def __len__(self):
return self._n_data
|
def disk_image_batch_dataset(img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):
"""Disk image batch dataset.
This function is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label
"""
if labels is None:
dataset = tf.data.Dataset.from_tensor_slices(img_paths)
elif isinstance(labels, tuple):
dataset = tf.data.Dataset.from_tensor_slices((img_paths,) + tuple(labels))
else:
dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels))
def parse_func(path, *label):
img = tf.read_file(path)
img = tf.image.decode_png(img, 1)
return (img,) + label
if map_func:
def map_func_(*args):
return map_func(*parse_func(*args))
else:
map_func_ = parse_func
# dataset = dataset.map(parse_func, num_parallel_calls=num_threads) is slower
dataset = batch_dataset(dataset, batch_size, prefetch_batch, drop_remainder, filter,
map_func_, num_threads, shuffle, buffer_size, repeat)
return dataset
| 136
| 169
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import multiprocessing
from functools import partial
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from general.utilTF1.utils import session
from general.kneeOsteoarthritisDataset.KneeOsteoarthritsDataset import KneeOsteoarthritsDataset
_N_CPU = multiprocessing.cpu_count()
def batch_dataset(dataset, batch_size, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):
if filter:
dataset = dataset.filter(filter)
if map_func:
dataset = dataset.map(map_func, num_parallel_calls=num_threads)
if shuffle:
dataset = dataset.shuffle(buffer_size)
if drop_remainder:
dataset = dataset.apply(tf.contrib.data.batch_and_drop_remainder(batch_size))
else:
dataset = dataset.batch(batch_size)
dataset = dataset.repeat(repeat).prefetch(prefetch_batch)
return dataset
class Dataset(object):
def __init__(self):
self._dataset = None
self._iterator = None
self._batch_op = None
self._sess = None
self._is_eager = tf.executing_eagerly()
self._eager_iterator = None
def __del__(self):
if self._sess:
self._sess.close()
def __iter__(self):
return self
def __next__(self):
try:
b = self.get_next()
except:
raise StopIteration
else:
return b
next = __next__
def get_next(self):
if self._is_eager:
return self._eager_iterator.get_next()
else:
return self._sess.run(self._batch_op)
def reset(self, feed_dict={}):
if self._is_eager:
self._eager_iterator = tfe.Iterator(self._dataset)
else:
self._sess.run(self._iterator.initializer, feed_dict=feed_dict)
def _bulid(self, dataset, sess=None):
self._dataset = dataset
if self._is_eager:
self._eager_iterator = tfe.Iterator(dataset)
else:
self._iterator = dataset.make_initializable_iterator()
self._batch_op = self._iterator.get_next()
if sess:
self._sess = sess
else:
self._sess = session()
try:
self.reset()
except:
pass
@property
def dataset(self):
return self._dataset
@property
def iterator(self):
return self._iterator
@property
def batch_op(self):
return self._batch_op
def get_dataset(data_set_path,shuffle=True):
# shape
img_shape = [256,256, 1]
# dataset
def _map_func(img,label):
img = tf.image.resize_images(img, [img_shape[0], img_shape[1]], method=tf.image.ResizeMethod.BICUBIC)
img = tf.clip_by_value(tf.cast(img, tf.float32), 0, 255) / 255 # / 127.5 - 1
return img,label
# get image pathes
#
kneeosteo_train = KneeOsteoarthritsDataset(data_path=data_set_path)
labels = list(kneeosteo_train.dict_url_class.values())
paths = list(kneeosteo_train.dict_url_class.keys())
assert (len(paths) == len(labels))
print('The dataset %s has %f elements. ' % (data_set_path, len(labels)))
Dataset = partial(DiskImageData, img_paths=paths,labels=labels, repeat=1, map_func=_map_func,shuffle=shuffle)
# index func
def get_imgs(batch):
return batch
return Dataset, img_shape, get_imgs
def disk_image_batch_dataset(img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1):
"""Disk image batch dataset.
This function is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list/tuple_of_list or tensor/tuple_of_tensor, each of which is a corresponding label
"""
if labels is None:
dataset = tf.data.Dataset.from_tensor_slices(img_paths)
elif isinstance(labels, tuple):
dataset = tf.data.Dataset.from_tensor_slices((img_paths,) + tuple(labels))
else:
dataset = tf.data.Dataset.from_tensor_slices((img_paths, labels))
def parse_func(path, *label):
img = tf.read_file(path)
img = tf.image.decode_png(img, 1)
return (img,) + label
if map_func:
def map_func_(*args):
return map_func(*parse_func(*args))
else:
map_func_ = parse_func
# dataset = dataset.map(parse_func, num_parallel_calls=num_threads) is slower
dataset = batch_dataset(dataset, batch_size, prefetch_batch, drop_remainder, filter,
map_func_, num_threads, shuffle, buffer_size, repeat)
return dataset
class DiskImageData(Dataset):
"""DiskImageData.
This class is suitable for jpg and png files
Arguments:
img_paths : String list or 1-D tensor, each of which is an iamge path
labels : Label list or tensor, each of which is a corresponding label
"""
def __init__(self, img_paths, batch_size, labels=None, prefetch_batch=_N_CPU + 1, drop_remainder=True, filter=None,
map_func=None, num_threads=_N_CPU, shuffle=True, buffer_size=4096, repeat=-1, sess=None):
super(DiskImageData, self).__init__()
dataset = disk_image_batch_dataset(img_paths, batch_size, labels, prefetch_batch, drop_remainder, filter,
map_func, num_threads, shuffle, buffer_size, repeat)
self._bulid(dataset, sess)
self._n_data = len(img_paths)
def __len__(self):
return self._n_data
|
compute_many
|
Compute ROUGE score between guess and *any* answer.
Done with compute_many due to increased efficiency.
:return: (rouge-1, rouge-2, rouge-L)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Provides standard metric evaluations for dialog.
Uses locking and shared memory when ``numthreads`` is set to >1 to share metrics between
processes.
"""
import re
from abc import ABC, abstractmethod
from collections import Counter
import queue
import functools
import datetime
from typing import Union, List, Optional, Tuple, Set, Any, Dict
import torch
from parlai.core.message import Message
from parlai.utils.misc import warn_once
from parlai.utils.typing import TScalar, TVector
try:
import torch.multiprocessing as multiprocessing
except ImportError:
import multiprocessing # type: ignore
DEFAULT_METRICS = {'bleu-4', 'accuracy', 'f1'}
ROUGE_METRICS = {'rouge-1', 'rouge-2', 'rouge-L'}
BLEU_METRICS = {'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4'}
ALL_METRICS = DEFAULT_METRICS | ROUGE_METRICS | BLEU_METRICS
try:
from nltk.translate import bleu_score as nltkbleu
except ImportError:
# User doesn't have nltk installed, so we can't use it for bleu
# We'll just turn off things, but we might want to warn the user
nltkbleu = None
try:
from fairseq import bleu as fairseqbleu
except ImportError:
fairseqbleu = None
try:
import rouge
except ImportError:
# User doesn't have py-rouge installed, so we can't use it.
# We'll just turn off rouge computations
rouge = None
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
@functools.total_ordering
class Metric(ABC):
"""
Base class for storing metrics.
Subclasses should define .value(). Examples are provided for each subclass.
"""
@property
def is_global(self) -> bool:
"""
Indicates whether this metric should be reported globally or per-task.
"""
return False
@property
def macro_average(self) -> bool:
"""
Indicates whether this metric should be macro-averaged when globally reported.
"""
return False
@abstractmethod
def value(self) -> float:
"""
Return the value of the metric as a float.
"""
pass
@abstractmethod
def __add__(self, other: Any) -> 'Metric':
raise NotImplementedError
def __iadd__(self, other):
return self.__radd__(other)
def __radd__(self, other: Any):
if other is None:
return self
return self.__add__(other)
def __str__(self) -> str:
return f'{self.value():.4g}'
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.value():.4g})'
def __float__(self) -> float:
return float(self.value())
def __int__(self) -> int:
return int(self.value())
def __eq__(self, other: Any) -> bool:
if isinstance(other, Metric):
return self.value() == other.value()
else:
return self.value() == other
def __lt__(self, other: Any) -> bool:
if isinstance(other, Metric):
return self.value() < other.value()
else:
return self.value() < other
def __sub__(self, other: Any) -> float:
"""
Used heavily for assertAlmostEqual.
"""
if not isinstance(other, float):
raise TypeError('Metrics.__sub__ is intentionally limited to floats.')
return self.value() - other
def __rsub__(self, other: Any) -> float:
"""
Used heavily for assertAlmostEqual.
NOTE: This is not necessary in python 3.7+.
"""
if not isinstance(other, float):
raise TypeError('Metrics.__rsub__ is intentionally limited to floats.')
return other - self.value()
@classmethod
def as_number(cls, obj: TScalar) -> Union[int, float]:
if isinstance(obj, torch.Tensor):
obj_as_number: Union[int, float] = obj.item()
else:
obj_as_number = obj # type: ignore
assert isinstance(obj_as_number, int) or isinstance(obj_as_number, float)
return obj_as_number
@classmethod
def as_float(cls, obj: TScalar) -> float:
return float(cls.as_number(obj))
@classmethod
def as_int(cls, obj: TScalar) -> int:
return int(cls.as_number(obj))
@classmethod
def many(cls, *objs: List[TVector]) -> List['Metric']:
"""
Construct many of a Metric from the base parts.
Useful if you separately compute numerators and denomenators, etc.
"""
lengths = [len(o) for o in objs]
if len(set(lengths)) != 1:
raise IndexError(f'Uneven {cls.__name__} constructions: {lengths}')
return [cls(*items) for items in zip(*objs)]
class FixedMetric(Metric):
"""
Fixed metrics are verified to be the same when combined, or throw an error.
FixedMetric is used for things like total_train_updates, which should not be
combined across different multitasks or different workers.
"""
__slots__ = ('_value',)
def __init__(self, value: TScalar):
self._value = self.as_number(value)
def __add__(self, other: Optional['FixedMetric']) -> 'FixedMetric':
if other is None:
return self
if self != other:
raise ValueError(f"FixedMetrics not the same: {self} and {other}")
return self
def value(self) -> float:
return self._value
class SumMetric(Metric):
"""
Class that keeps a running sum of some metric.
Examples of SumMetric include things like "exs", the number of examples seen since
the last report, which depends exactly on a teacher.
"""
__slots__ = ('_sum',)
def __init__(self, sum_: TScalar = 0):
if isinstance(sum_, torch.Tensor):
self._sum = sum_.item()
else:
assert isinstance(sum_, (int, float))
self._sum = sum_
def __add__(self, other: Optional['SumMetric']) -> 'SumMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
full_sum = self._sum + other._sum
# always keep the same return type
return type(self)(sum_=full_sum)
def value(self) -> float:
return self._sum
class AverageMetric(Metric):
"""
Class that keeps a running average of some metric.
Examples of AverageMetrics include hits@1, F1, accuracy, etc. These metrics all have
per-example values that can be directly mapped back to a teacher.
"""
__slots__ = ('_numer', '_denom')
@property
def macro_average(self) -> bool:
"""
Indicates whether this metric should be macro-averaged when globally reported.
"""
return True
def __init__(self, numer: TScalar, denom: TScalar = 1):
self._numer = self.as_number(numer)
self._denom = self.as_number(denom)
def __add__(self, other: Optional['AverageMetric']) -> 'AverageMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
full_numer: TScalar = self._numer + other._numer
full_denom: TScalar = self._denom + other._denom
# always keep the same return type
return type(self)(numer=full_numer, denom=full_denom)
def value(self) -> float:
if self._numer == 0 and self._denom == 0:
# don't nan out if we haven't counted anything
return 0.0
if self._denom == 0:
return float('nan')
return self._numer / self._denom
class MacroAverageMetric(Metric):
"""
Class that represents the macro average of several numbers.
Used for aggregating task level metrics. It is only used for things that are
AverageMetrics already.
"""
__slots__ = '_values'
def __init__(self, metrics: Dict[str, Metric]) -> None:
self._values = metrics
def __add__(self, other: Optional['MacroAverageMetric']) -> 'MacroAverageMetric':
if other is None:
return self
output = dict(**self._values)
for k, v in other._values.items():
output[k] = output.get(k, None) + v
return MacroAverageMetric(output)
def value(self) -> float:
sum_ = sum(v.value() for v in self._values.values())
n = len(self._values)
return sum_ / n
class TimerMetric(Metric):
"""
A timer metric keep tracks of the first/last times it was used.
"""
__slots__ = ('_value', '_start', '_end')
@classmethod
def _now(cls) -> int:
return datetime.datetime.utcnow().timestamp()
def __init__(
self,
value: TScalar,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
):
self._value = self.as_number(value)
if start_time is None:
start_time = self._now()
if end_time is None:
end_time = self._now()
self._start = start_time
self._end = end_time
def __add__(self, other: Optional['TimerMetric']) -> 'TimerMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
total: TScalar = self._value + other._value
start: int = min(self._start, other._start)
end: int = max(self._start, other._end)
return type(self)(total, start, end)
def value(self) -> float:
if self._value == 0 or self._end == self._start:
return 0
return self._value / (self._end - self._start)
class GlobalMetric:
"""
A global metric is one that should not be aggregated across different tasks.
Examples of global metric include things like learning rate and updates.
These need to be accumulated or averaged over multiple parleys, but cannot
be correlated with a single task.
Key to it is the notion that any one worker or any one task already has a global
view of the value, and so no combinations should be done. Note this is different
then a FixedMetric, in that a GlobalMetric can be still averaged across multiple
parleys(), but a FixedMetric is always fixed.
"""
@property
def is_global(self) -> bool:
return True
class GlobalFixedMetric(GlobalMetric, FixedMetric):
"""
Global fixed metric.
Used for things like total_train_updates.
"""
pass
class GlobalSumMetric(GlobalMetric, SumMetric):
"""
Global sum metric.
Used for 'exs' and 'updates'.
"""
pass
class GlobalAverageMetric(GlobalMetric, AverageMetric):
"""
Global Average metric.
Used for things like learning rate, and many agent-specific metrics.
"""
pass
class LegacyMetric(GlobalAverageMetric):
"""
Legacy Metrics are reported by agent as float.
"""
pass
class GlobalTimerMetric(GlobalMetric, TimerMetric):
pass
class F1Metric(AverageMetric):
"""
Helper class which computes token-level F1.
"""
@staticmethod
def _prec_recall_f1_score(pred_items, gold_items):
"""
Compute precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
"""
common = Counter(gold_items) & Counter(pred_items)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(pred_items)
recall = 1.0 * num_same / len(gold_items)
f1 = (2 * precision * recall) / (precision + recall)
return precision, recall, f1
@staticmethod
def compute(guess: str, answers: List[str]) -> 'F1Metric':
if guess is None or answers is None:
return AverageMetric(0, 0)
g_tokens = normalize_answer(guess).split()
scores = [
F1Metric._prec_recall_f1_score(g_tokens, normalize_answer(a).split())
for a in answers
]
return F1Metric(max(f1 for p, r, f1 in scores), 1)
class ExactMatchMetric(AverageMetric):
@staticmethod
def compute(guess: str, answers: List[str]) -> 'ExactMatchMetric':
if guess is None or answers is None:
return None
guess = normalize_answer(guess)
for a in answers:
if guess == normalize_answer(a):
return ExactMatchMetric(1)
return ExactMatchMetric(0)
class BleuMetric(AverageMetric):
@staticmethod
def compute(guess: str, answers: List[str], k: int = 4) -> Optional['BleuMetric']:
"""
Compute approximate BLEU score between guess and a set of answers.
"""
if nltkbleu is None:
# bleu library not installed, just return a default value
return None
# Warning: BLEU calculation *should* include proper tokenization and
# punctuation etc. We're using the normalize_answer for everything though,
# so we're over-estimating our BLEU scores. Also note that NLTK's bleu is
# going to be slower than fairseq's (which is written in C), but fairseq's
# requires that everything be in arrays of ints (i.e. as tensors). NLTK's
# works with strings, which is better suited for this module.
weights = [1 / k for _ in range(k)]
score = nltkbleu.sentence_bleu(
[normalize_answer(a).split(" ") for a in answers],
normalize_answer(guess).split(" "),
smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1,
weights=weights,
)
return BleuMetric(score)
class FairseqBleuMetric(AverageMetric):
@staticmethod
def compute_many(
guess: torch.Tensor, answers: torch.Tensor, pad_idx, end_idx, unk_idx
):
"""
Return BLEU-1..4 using fairseq and tokens.
"""
if fairseqbleu is None:
return None
scorer = fairseqbleu.Scorer(pad_idx, end_idx, unk_idx)
answers = answers.cpu().int()
guess = guess.cpu().int()
scorer.add(answers, guess)
return [FairseqBleuMetric(scorer.score(i) / 100.0) for i in range(1, 5)]
class RougeMetric(AverageMetric):
_evaluator = None
# MASKED: compute_many function (lines 491-533)
def normalize_answer(s):
"""
Lower text and remove punctuation, articles and extra whitespace.
"""
s = s.lower()
s = re_punc.sub(' ', s)
s = re_art.sub(' ', s)
# TODO: this could almost certainly be faster with a regex \s+ -> ' '
s = ' '.join(s.split())
return s
def aggregate_named_reports(
named_reports: Dict[str, Dict[str, Metric]], micro_average: bool = False
) -> Dict[str, Metric]:
"""
Aggregate metrics from multiple reports.
:param reports:
Dict of tasks -> metrics.
:param micro_average:
If true, top level metrics will be the micro average. By default, we
use macro average.
:return:
The aggregated report
"""
if len(named_reports) == 0:
raise ValueError("Cannot aggregate empty reports.")
if len(named_reports) == 1:
# no real aggregation to be done
return next(iter(named_reports.values()))
# reporters is a list of teachers or worlds
m: Dict[str, Metric] = {}
macro_averages: Dict[str, Dict[str, Metric]] = {}
for task_id, task_report in named_reports.items():
for each_metric, value in task_report.items():
if value.is_global:
# just take the first one we saw
if each_metric not in m:
m[each_metric] = value
else:
task_metric = f'{task_id}/{each_metric}'
m[task_metric] = m.get(task_metric) + value
if micro_average or not value.macro_average:
# none + a => a from implementation of Metric.__add__
m[each_metric] = m.get(each_metric) + value
else:
# macro average
if each_metric not in macro_averages:
macro_averages[each_metric] = {}
macro_averages[each_metric][task_id] = value
for key, values in macro_averages.items():
m[key] = MacroAverageMetric(values)
return m
def aggregate_unnamed_reports(reports: List[Dict[str, Metric]]) -> Dict[str, Metric]:
"""
Combines metrics without regard for tracking provenence.
"""
m: Dict[str, Metric] = {}
for task_report in reports:
for each_metric, value in task_report.items():
m[each_metric] = m.get(each_metric) + value
return m
class Metrics(object):
"""
Threadsafe metrics container focused on aggregation.
"""
def __init__(self, threadsafe=False, shared=None):
self._threadsafe = threadsafe
if self._threadsafe and shared is None:
# Threadsafe metrics tracking works by keeping a queue that workers can
# push updates to. the main worker works through the queue at report
# time. We could add some buffering to improve performance, but we
# are deprioritizing hogwild performance at this time.
self._buffer = None
self._queue = multiprocessing.SimpleQueue()
self._worker = False
self._data = {}
elif shared and 'queue' in shared:
# This is a clone, in threadsafe mode
self._buffer = {}
self._queue = shared['queue']
self._worker = True
self._data = None
elif shared and 'data' in shared:
# This is a clone, in non-threadsafe mode
self._buffer = None
self._queue = None
self._worker = False
self._data = shared['data']
else:
# The original in non-threadsafe mode
self._buffer = None
self._queue = None
self._worker = False
self._data = {}
def __str__(self):
return str(self._data)
def __repr__(self):
return f'Metrics({repr(self._data)})'
def add(self, key: str, value: Optional[Metric]) -> None:
"""
Record an accumulation to a metric.
"""
if self._threadsafe and self._worker:
self._buffer[key] = self._buffer.get(key) + value
else:
self._data[key] = self._data.get(key) + value
def flush(self):
"""
Clear the local buffer and push it on.
"""
if self._threadsafe and self._buffer:
self._queue.put(self._buffer)
self._buffer.clear()
def report(self):
"""
Report the metrics over all data seen so far.
"""
self.sync()
return {k: v for k, v in self._data.items()}
def sync(self):
"""
Process all items on the queue to ensure it is up to date.
"""
if self._worker:
self.flush()
elif self._threadsafe and not self._worker:
for buffer_ in self._drain_queue():
for key, value in buffer_.items():
self._data[key] = self._data.get(key) + value
def _drain_queue(self):
"""
Drain the queue, yielding all items in it.
"""
while not self._queue.empty():
try:
yield self._queue.get()
except queue.Empty:
break
def clear(self):
"""
Clear all the metrics.
"""
if self._worker:
self._buffer.clear()
elif self._threadsafe and not self._worker:
for _ in self._drain_queue():
pass
if self._data:
self._data.clear()
def share(self):
if self._threadsafe:
return {'queue': self._queue}
else:
return {'data': self._data}
class TeacherMetrics(Metrics):
"""
Helper container which encapsulates standard metrics (F1, BLEU, ...).
"""
def __init__(
self,
threadsafe: bool = False,
metrics_list: str = "default",
shared: Dict[str, Any] = None,
) -> None:
super().__init__(threadsafe=threadsafe, shared=shared)
self._metrics_list = self._infer_metrics(metrics_list)
self.eval_pr = [1, 5, 10, 100]
@staticmethod
def _infer_metrics(cli_arg: str) -> Set[str]:
"""
Parse the CLI metric into a list of metrics we wish to compute.
"""
col: Set[str] = set()
names = cli_arg.split(",")
for n in names:
if n == 'default':
col |= DEFAULT_METRICS
elif n == 'rouge':
col |= ROUGE_METRICS
elif n == 'bleu':
col |= BLEU_METRICS
elif n == 'all':
col |= ALL_METRICS
else:
col.add(n)
return col
def _update_ranking_metrics(self, observation, labels):
text_cands = observation.get('text_candidates', None)
if text_cands is None:
return
# Now loop through text candidates, assuming they are sorted.
# If any of them is a label then score a point.
# maintain hits@1, 5, 10, 50, 100, etc.
label_set = set(normalize_answer(l) for l in labels)
cnts = {k: 0 for k in self.eval_pr}
cnt = 0
for c in text_cands:
cnt += 1
if normalize_answer(c) in label_set:
for k in self.eval_pr:
if cnt <= k:
cnts[k] += 1
# hits metric is 1 if cnts[k] > 0.
# (other metrics such as p@k and r@k take
# the value of cnt into account.)
for k in self.eval_pr:
self.add(f'hits@{k}', AverageMetric(cnts[k] > 0))
def evaluate_response(self, observation: Message, labels: List[str]) -> None:
"""
Compute all required text-based metrics based on an observation and labels.
"""
prediction = observation.get('text', None)
self.add('exs', SumMetric(1))
if prediction is not None:
self.add('accuracy', ExactMatchMetric.compute(prediction, labels))
self.add('f1', F1Metric.compute(prediction, labels))
for k in range(1, 5): # 1..4
if f'bleu-{k}' in self._metrics_list:
self.add(f'bleu-{k}', BleuMetric.compute(prediction, labels, k))
# if any of the rouges are in the list
if self._metrics_list & ROUGE_METRICS:
r1, r2, rL = RougeMetric.compute_many(prediction, labels)
if 'rouge-1' in self._metrics_list:
self.add('rouge_1', r1)
if 'rouge-2' in self._metrics_list:
self.add('rouge_2', r2)
if 'rouge-L' in self._metrics_list:
self.add('rouge_L', rL)
# Ranking metrics.
self._update_ranking_metrics(observation, labels)
# User-reported metrics
if 'metrics' in observation:
for uk, v in observation['metrics'].items():
if uk in ALL_METRICS:
# don't let the user override our metrics
uk = f'USER_{uk}'
assert isinstance(uk, str), type(k)
if not isinstance(v, Metric):
warn_once(f'Metric {uk} is assumed to be averaged per example.')
v = AverageMetric(v)
assert isinstance(v, Metric)
self.add(uk, v)
# always flush at the end of processing this response
self.flush()
|
@staticmethod
def compute_many(
guess: str, answers: List[str]
) -> Tuple[
Optional['RougeMetric'], Optional['RougeMetric'], Optional['RougeMetric']
]:
"""
Compute ROUGE score between guess and *any* answer.
Done with compute_many due to increased efficiency.
:return: (rouge-1, rouge-2, rouge-L)
"""
# possible global initialization
global rouge
if rouge is None:
return None, None, None
if RougeMetric._evaluator is None:
RougeMetric._evaluator = rouge.Rouge(
metrics=['rouge-n', 'rouge-l'], max_n=2
)
try:
scores = [
RougeMetric._evaluator.get_scores(
normalize_answer(guess), normalize_answer(a)
)
for a in answers
]
except LookupError:
warn_once(
'ROUGE requires nltk punkt tokenizer. Please run '
'`python -c "import nltk; nltk.download(\'punkt\')`'
)
return None, None, None
scores_rouge1 = max(score['rouge-1']['r'] for score in scores)
scores_rouge2 = max(score['rouge-2']['r'] for score in scores)
scores_rougeL = max(score['rouge-l']['r'] for score in scores)
return (
RougeMetric(scores_rouge1),
RougeMetric(scores_rouge2),
RougeMetric(scores_rougeL),
)
| 491
| 533
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Provides standard metric evaluations for dialog.
Uses locking and shared memory when ``numthreads`` is set to >1 to share metrics between
processes.
"""
import re
from abc import ABC, abstractmethod
from collections import Counter
import queue
import functools
import datetime
from typing import Union, List, Optional, Tuple, Set, Any, Dict
import torch
from parlai.core.message import Message
from parlai.utils.misc import warn_once
from parlai.utils.typing import TScalar, TVector
try:
import torch.multiprocessing as multiprocessing
except ImportError:
import multiprocessing # type: ignore
DEFAULT_METRICS = {'bleu-4', 'accuracy', 'f1'}
ROUGE_METRICS = {'rouge-1', 'rouge-2', 'rouge-L'}
BLEU_METRICS = {'bleu-1', 'bleu-2', 'bleu-3', 'bleu-4'}
ALL_METRICS = DEFAULT_METRICS | ROUGE_METRICS | BLEU_METRICS
try:
from nltk.translate import bleu_score as nltkbleu
except ImportError:
# User doesn't have nltk installed, so we can't use it for bleu
# We'll just turn off things, but we might want to warn the user
nltkbleu = None
try:
from fairseq import bleu as fairseqbleu
except ImportError:
fairseqbleu = None
try:
import rouge
except ImportError:
# User doesn't have py-rouge installed, so we can't use it.
# We'll just turn off rouge computations
rouge = None
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
@functools.total_ordering
class Metric(ABC):
"""
Base class for storing metrics.
Subclasses should define .value(). Examples are provided for each subclass.
"""
@property
def is_global(self) -> bool:
"""
Indicates whether this metric should be reported globally or per-task.
"""
return False
@property
def macro_average(self) -> bool:
"""
Indicates whether this metric should be macro-averaged when globally reported.
"""
return False
@abstractmethod
def value(self) -> float:
"""
Return the value of the metric as a float.
"""
pass
@abstractmethod
def __add__(self, other: Any) -> 'Metric':
raise NotImplementedError
def __iadd__(self, other):
return self.__radd__(other)
def __radd__(self, other: Any):
if other is None:
return self
return self.__add__(other)
def __str__(self) -> str:
return f'{self.value():.4g}'
def __repr__(self) -> str:
return f'{self.__class__.__name__}({self.value():.4g})'
def __float__(self) -> float:
return float(self.value())
def __int__(self) -> int:
return int(self.value())
def __eq__(self, other: Any) -> bool:
if isinstance(other, Metric):
return self.value() == other.value()
else:
return self.value() == other
def __lt__(self, other: Any) -> bool:
if isinstance(other, Metric):
return self.value() < other.value()
else:
return self.value() < other
def __sub__(self, other: Any) -> float:
"""
Used heavily for assertAlmostEqual.
"""
if not isinstance(other, float):
raise TypeError('Metrics.__sub__ is intentionally limited to floats.')
return self.value() - other
def __rsub__(self, other: Any) -> float:
"""
Used heavily for assertAlmostEqual.
NOTE: This is not necessary in python 3.7+.
"""
if not isinstance(other, float):
raise TypeError('Metrics.__rsub__ is intentionally limited to floats.')
return other - self.value()
@classmethod
def as_number(cls, obj: TScalar) -> Union[int, float]:
if isinstance(obj, torch.Tensor):
obj_as_number: Union[int, float] = obj.item()
else:
obj_as_number = obj # type: ignore
assert isinstance(obj_as_number, int) or isinstance(obj_as_number, float)
return obj_as_number
@classmethod
def as_float(cls, obj: TScalar) -> float:
return float(cls.as_number(obj))
@classmethod
def as_int(cls, obj: TScalar) -> int:
return int(cls.as_number(obj))
@classmethod
def many(cls, *objs: List[TVector]) -> List['Metric']:
"""
Construct many of a Metric from the base parts.
Useful if you separately compute numerators and denomenators, etc.
"""
lengths = [len(o) for o in objs]
if len(set(lengths)) != 1:
raise IndexError(f'Uneven {cls.__name__} constructions: {lengths}')
return [cls(*items) for items in zip(*objs)]
class FixedMetric(Metric):
"""
Fixed metrics are verified to be the same when combined, or throw an error.
FixedMetric is used for things like total_train_updates, which should not be
combined across different multitasks or different workers.
"""
__slots__ = ('_value',)
def __init__(self, value: TScalar):
self._value = self.as_number(value)
def __add__(self, other: Optional['FixedMetric']) -> 'FixedMetric':
if other is None:
return self
if self != other:
raise ValueError(f"FixedMetrics not the same: {self} and {other}")
return self
def value(self) -> float:
return self._value
class SumMetric(Metric):
"""
Class that keeps a running sum of some metric.
Examples of SumMetric include things like "exs", the number of examples seen since
the last report, which depends exactly on a teacher.
"""
__slots__ = ('_sum',)
def __init__(self, sum_: TScalar = 0):
if isinstance(sum_, torch.Tensor):
self._sum = sum_.item()
else:
assert isinstance(sum_, (int, float))
self._sum = sum_
def __add__(self, other: Optional['SumMetric']) -> 'SumMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
full_sum = self._sum + other._sum
# always keep the same return type
return type(self)(sum_=full_sum)
def value(self) -> float:
return self._sum
class AverageMetric(Metric):
"""
Class that keeps a running average of some metric.
Examples of AverageMetrics include hits@1, F1, accuracy, etc. These metrics all have
per-example values that can be directly mapped back to a teacher.
"""
__slots__ = ('_numer', '_denom')
@property
def macro_average(self) -> bool:
"""
Indicates whether this metric should be macro-averaged when globally reported.
"""
return True
def __init__(self, numer: TScalar, denom: TScalar = 1):
self._numer = self.as_number(numer)
self._denom = self.as_number(denom)
def __add__(self, other: Optional['AverageMetric']) -> 'AverageMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
full_numer: TScalar = self._numer + other._numer
full_denom: TScalar = self._denom + other._denom
# always keep the same return type
return type(self)(numer=full_numer, denom=full_denom)
def value(self) -> float:
if self._numer == 0 and self._denom == 0:
# don't nan out if we haven't counted anything
return 0.0
if self._denom == 0:
return float('nan')
return self._numer / self._denom
class MacroAverageMetric(Metric):
"""
Class that represents the macro average of several numbers.
Used for aggregating task level metrics. It is only used for things that are
AverageMetrics already.
"""
__slots__ = '_values'
def __init__(self, metrics: Dict[str, Metric]) -> None:
self._values = metrics
def __add__(self, other: Optional['MacroAverageMetric']) -> 'MacroAverageMetric':
if other is None:
return self
output = dict(**self._values)
for k, v in other._values.items():
output[k] = output.get(k, None) + v
return MacroAverageMetric(output)
def value(self) -> float:
sum_ = sum(v.value() for v in self._values.values())
n = len(self._values)
return sum_ / n
class TimerMetric(Metric):
"""
A timer metric keep tracks of the first/last times it was used.
"""
__slots__ = ('_value', '_start', '_end')
@classmethod
def _now(cls) -> int:
return datetime.datetime.utcnow().timestamp()
def __init__(
self,
value: TScalar,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
):
self._value = self.as_number(value)
if start_time is None:
start_time = self._now()
if end_time is None:
end_time = self._now()
self._start = start_time
self._end = end_time
def __add__(self, other: Optional['TimerMetric']) -> 'TimerMetric':
# NOTE: hinting can be cleaned up with "from __future__ import annotations" when
# we drop Python 3.6
if other is None:
return self
total: TScalar = self._value + other._value
start: int = min(self._start, other._start)
end: int = max(self._start, other._end)
return type(self)(total, start, end)
def value(self) -> float:
if self._value == 0 or self._end == self._start:
return 0
return self._value / (self._end - self._start)
class GlobalMetric:
"""
A global metric is one that should not be aggregated across different tasks.
Examples of global metric include things like learning rate and updates.
These need to be accumulated or averaged over multiple parleys, but cannot
be correlated with a single task.
Key to it is the notion that any one worker or any one task already has a global
view of the value, and so no combinations should be done. Note this is different
then a FixedMetric, in that a GlobalMetric can be still averaged across multiple
parleys(), but a FixedMetric is always fixed.
"""
@property
def is_global(self) -> bool:
return True
class GlobalFixedMetric(GlobalMetric, FixedMetric):
"""
Global fixed metric.
Used for things like total_train_updates.
"""
pass
class GlobalSumMetric(GlobalMetric, SumMetric):
"""
Global sum metric.
Used for 'exs' and 'updates'.
"""
pass
class GlobalAverageMetric(GlobalMetric, AverageMetric):
"""
Global Average metric.
Used for things like learning rate, and many agent-specific metrics.
"""
pass
class LegacyMetric(GlobalAverageMetric):
"""
Legacy Metrics are reported by agent as float.
"""
pass
class GlobalTimerMetric(GlobalMetric, TimerMetric):
pass
class F1Metric(AverageMetric):
"""
Helper class which computes token-level F1.
"""
@staticmethod
def _prec_recall_f1_score(pred_items, gold_items):
"""
Compute precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
"""
common = Counter(gold_items) & Counter(pred_items)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(pred_items)
recall = 1.0 * num_same / len(gold_items)
f1 = (2 * precision * recall) / (precision + recall)
return precision, recall, f1
@staticmethod
def compute(guess: str, answers: List[str]) -> 'F1Metric':
if guess is None or answers is None:
return AverageMetric(0, 0)
g_tokens = normalize_answer(guess).split()
scores = [
F1Metric._prec_recall_f1_score(g_tokens, normalize_answer(a).split())
for a in answers
]
return F1Metric(max(f1 for p, r, f1 in scores), 1)
class ExactMatchMetric(AverageMetric):
@staticmethod
def compute(guess: str, answers: List[str]) -> 'ExactMatchMetric':
if guess is None or answers is None:
return None
guess = normalize_answer(guess)
for a in answers:
if guess == normalize_answer(a):
return ExactMatchMetric(1)
return ExactMatchMetric(0)
class BleuMetric(AverageMetric):
@staticmethod
def compute(guess: str, answers: List[str], k: int = 4) -> Optional['BleuMetric']:
"""
Compute approximate BLEU score between guess and a set of answers.
"""
if nltkbleu is None:
# bleu library not installed, just return a default value
return None
# Warning: BLEU calculation *should* include proper tokenization and
# punctuation etc. We're using the normalize_answer for everything though,
# so we're over-estimating our BLEU scores. Also note that NLTK's bleu is
# going to be slower than fairseq's (which is written in C), but fairseq's
# requires that everything be in arrays of ints (i.e. as tensors). NLTK's
# works with strings, which is better suited for this module.
weights = [1 / k for _ in range(k)]
score = nltkbleu.sentence_bleu(
[normalize_answer(a).split(" ") for a in answers],
normalize_answer(guess).split(" "),
smoothing_function=nltkbleu.SmoothingFunction(epsilon=1e-12).method1,
weights=weights,
)
return BleuMetric(score)
class FairseqBleuMetric(AverageMetric):
@staticmethod
def compute_many(
guess: torch.Tensor, answers: torch.Tensor, pad_idx, end_idx, unk_idx
):
"""
Return BLEU-1..4 using fairseq and tokens.
"""
if fairseqbleu is None:
return None
scorer = fairseqbleu.Scorer(pad_idx, end_idx, unk_idx)
answers = answers.cpu().int()
guess = guess.cpu().int()
scorer.add(answers, guess)
return [FairseqBleuMetric(scorer.score(i) / 100.0) for i in range(1, 5)]
class RougeMetric(AverageMetric):
_evaluator = None
@staticmethod
def compute_many(
guess: str, answers: List[str]
) -> Tuple[
Optional['RougeMetric'], Optional['RougeMetric'], Optional['RougeMetric']
]:
"""
Compute ROUGE score between guess and *any* answer.
Done with compute_many due to increased efficiency.
:return: (rouge-1, rouge-2, rouge-L)
"""
# possible global initialization
global rouge
if rouge is None:
return None, None, None
if RougeMetric._evaluator is None:
RougeMetric._evaluator = rouge.Rouge(
metrics=['rouge-n', 'rouge-l'], max_n=2
)
try:
scores = [
RougeMetric._evaluator.get_scores(
normalize_answer(guess), normalize_answer(a)
)
for a in answers
]
except LookupError:
warn_once(
'ROUGE requires nltk punkt tokenizer. Please run '
'`python -c "import nltk; nltk.download(\'punkt\')`'
)
return None, None, None
scores_rouge1 = max(score['rouge-1']['r'] for score in scores)
scores_rouge2 = max(score['rouge-2']['r'] for score in scores)
scores_rougeL = max(score['rouge-l']['r'] for score in scores)
return (
RougeMetric(scores_rouge1),
RougeMetric(scores_rouge2),
RougeMetric(scores_rougeL),
)
def normalize_answer(s):
"""
Lower text and remove punctuation, articles and extra whitespace.
"""
s = s.lower()
s = re_punc.sub(' ', s)
s = re_art.sub(' ', s)
# TODO: this could almost certainly be faster with a regex \s+ -> ' '
s = ' '.join(s.split())
return s
def aggregate_named_reports(
named_reports: Dict[str, Dict[str, Metric]], micro_average: bool = False
) -> Dict[str, Metric]:
"""
Aggregate metrics from multiple reports.
:param reports:
Dict of tasks -> metrics.
:param micro_average:
If true, top level metrics will be the micro average. By default, we
use macro average.
:return:
The aggregated report
"""
if len(named_reports) == 0:
raise ValueError("Cannot aggregate empty reports.")
if len(named_reports) == 1:
# no real aggregation to be done
return next(iter(named_reports.values()))
# reporters is a list of teachers or worlds
m: Dict[str, Metric] = {}
macro_averages: Dict[str, Dict[str, Metric]] = {}
for task_id, task_report in named_reports.items():
for each_metric, value in task_report.items():
if value.is_global:
# just take the first one we saw
if each_metric not in m:
m[each_metric] = value
else:
task_metric = f'{task_id}/{each_metric}'
m[task_metric] = m.get(task_metric) + value
if micro_average or not value.macro_average:
# none + a => a from implementation of Metric.__add__
m[each_metric] = m.get(each_metric) + value
else:
# macro average
if each_metric not in macro_averages:
macro_averages[each_metric] = {}
macro_averages[each_metric][task_id] = value
for key, values in macro_averages.items():
m[key] = MacroAverageMetric(values)
return m
def aggregate_unnamed_reports(reports: List[Dict[str, Metric]]) -> Dict[str, Metric]:
"""
Combines metrics without regard for tracking provenence.
"""
m: Dict[str, Metric] = {}
for task_report in reports:
for each_metric, value in task_report.items():
m[each_metric] = m.get(each_metric) + value
return m
class Metrics(object):
"""
Threadsafe metrics container focused on aggregation.
"""
def __init__(self, threadsafe=False, shared=None):
self._threadsafe = threadsafe
if self._threadsafe and shared is None:
# Threadsafe metrics tracking works by keeping a queue that workers can
# push updates to. the main worker works through the queue at report
# time. We could add some buffering to improve performance, but we
# are deprioritizing hogwild performance at this time.
self._buffer = None
self._queue = multiprocessing.SimpleQueue()
self._worker = False
self._data = {}
elif shared and 'queue' in shared:
# This is a clone, in threadsafe mode
self._buffer = {}
self._queue = shared['queue']
self._worker = True
self._data = None
elif shared and 'data' in shared:
# This is a clone, in non-threadsafe mode
self._buffer = None
self._queue = None
self._worker = False
self._data = shared['data']
else:
# The original in non-threadsafe mode
self._buffer = None
self._queue = None
self._worker = False
self._data = {}
def __str__(self):
return str(self._data)
def __repr__(self):
return f'Metrics({repr(self._data)})'
def add(self, key: str, value: Optional[Metric]) -> None:
"""
Record an accumulation to a metric.
"""
if self._threadsafe and self._worker:
self._buffer[key] = self._buffer.get(key) + value
else:
self._data[key] = self._data.get(key) + value
def flush(self):
"""
Clear the local buffer and push it on.
"""
if self._threadsafe and self._buffer:
self._queue.put(self._buffer)
self._buffer.clear()
def report(self):
"""
Report the metrics over all data seen so far.
"""
self.sync()
return {k: v for k, v in self._data.items()}
def sync(self):
"""
Process all items on the queue to ensure it is up to date.
"""
if self._worker:
self.flush()
elif self._threadsafe and not self._worker:
for buffer_ in self._drain_queue():
for key, value in buffer_.items():
self._data[key] = self._data.get(key) + value
def _drain_queue(self):
"""
Drain the queue, yielding all items in it.
"""
while not self._queue.empty():
try:
yield self._queue.get()
except queue.Empty:
break
def clear(self):
"""
Clear all the metrics.
"""
if self._worker:
self._buffer.clear()
elif self._threadsafe and not self._worker:
for _ in self._drain_queue():
pass
if self._data:
self._data.clear()
def share(self):
if self._threadsafe:
return {'queue': self._queue}
else:
return {'data': self._data}
class TeacherMetrics(Metrics):
"""
Helper container which encapsulates standard metrics (F1, BLEU, ...).
"""
def __init__(
self,
threadsafe: bool = False,
metrics_list: str = "default",
shared: Dict[str, Any] = None,
) -> None:
super().__init__(threadsafe=threadsafe, shared=shared)
self._metrics_list = self._infer_metrics(metrics_list)
self.eval_pr = [1, 5, 10, 100]
@staticmethod
def _infer_metrics(cli_arg: str) -> Set[str]:
"""
Parse the CLI metric into a list of metrics we wish to compute.
"""
col: Set[str] = set()
names = cli_arg.split(",")
for n in names:
if n == 'default':
col |= DEFAULT_METRICS
elif n == 'rouge':
col |= ROUGE_METRICS
elif n == 'bleu':
col |= BLEU_METRICS
elif n == 'all':
col |= ALL_METRICS
else:
col.add(n)
return col
def _update_ranking_metrics(self, observation, labels):
text_cands = observation.get('text_candidates', None)
if text_cands is None:
return
# Now loop through text candidates, assuming they are sorted.
# If any of them is a label then score a point.
# maintain hits@1, 5, 10, 50, 100, etc.
label_set = set(normalize_answer(l) for l in labels)
cnts = {k: 0 for k in self.eval_pr}
cnt = 0
for c in text_cands:
cnt += 1
if normalize_answer(c) in label_set:
for k in self.eval_pr:
if cnt <= k:
cnts[k] += 1
# hits metric is 1 if cnts[k] > 0.
# (other metrics such as p@k and r@k take
# the value of cnt into account.)
for k in self.eval_pr:
self.add(f'hits@{k}', AverageMetric(cnts[k] > 0))
def evaluate_response(self, observation: Message, labels: List[str]) -> None:
"""
Compute all required text-based metrics based on an observation and labels.
"""
prediction = observation.get('text', None)
self.add('exs', SumMetric(1))
if prediction is not None:
self.add('accuracy', ExactMatchMetric.compute(prediction, labels))
self.add('f1', F1Metric.compute(prediction, labels))
for k in range(1, 5): # 1..4
if f'bleu-{k}' in self._metrics_list:
self.add(f'bleu-{k}', BleuMetric.compute(prediction, labels, k))
# if any of the rouges are in the list
if self._metrics_list & ROUGE_METRICS:
r1, r2, rL = RougeMetric.compute_many(prediction, labels)
if 'rouge-1' in self._metrics_list:
self.add('rouge_1', r1)
if 'rouge-2' in self._metrics_list:
self.add('rouge_2', r2)
if 'rouge-L' in self._metrics_list:
self.add('rouge_L', rL)
# Ranking metrics.
self._update_ranking_metrics(observation, labels)
# User-reported metrics
if 'metrics' in observation:
for uk, v in observation['metrics'].items():
if uk in ALL_METRICS:
# don't let the user override our metrics
uk = f'USER_{uk}'
assert isinstance(uk, str), type(k)
if not isinstance(v, Metric):
warn_once(f'Metric {uk} is assumed to be averaged per example.')
v = AverageMetric(v)
assert isinstance(v, Metric)
self.add(uk, v)
# always flush at the end of processing this response
self.flush()
|
get
|
Get values according to the filepath.
Args:
filepath (str | obj:`Path`): Here, filepath is the lmdb key.
|
import inspect
import warnings
from abc import ABCMeta, abstractmethod
from mmcv_custom.fileio.zipreader import ZipReader
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: `get()` and `get_text()`.
`get()` reads the file as a byte stream and `get_text()` reads the file
as texts.
"""
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
class CephBackend(BaseStorageBackend):
"""Ceph storage backend.
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
"""
def __init__(self, path_mapping=None):
try:
import ceph
warnings.warn('Ceph is deprecate in favor of Petrel.')
except ImportError:
raise ImportError('Please install ceph to enable CephBackend.')
self._client = ceph.S3Client()
assert isinstance(path_mapping, dict) or path_mapping is None
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if self.path_mapping is not None:
for k, v in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class PetrelBackend(BaseStorageBackend):
"""Petrel storage backend (for internal use).
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
"""
def __init__(self, path_mapping=None):
try:
from petrel_client import client
except ImportError:
raise ImportError('Please install petrel_client to enable '
'PetrelBackend.')
self._client = client.Client()
assert isinstance(path_mapping, dict) or path_mapping is None
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if self.path_mapping is not None:
for k, v in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class MemcachedBackend(BaseStorageBackend):
"""Memcached storage backend.
Attributes:
server_list_cfg (str): Config file for memcached server list.
client_cfg (str): Config file for memcached client.
sys_path (str | None): Additional path to be appended to `sys.path`.
Default: None.
"""
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
if sys_path is not None:
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError(
'Please install memcached to enable MemcachedBackend.')
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
self.client_cfg)
# mc.pyvector servers as a point which points to a memory cache
self._mc_buffer = mc.pyvector()
def get(self, filepath):
filepath = str(filepath)
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class LmdbBackend(BaseStorageBackend):
"""Lmdb storage backend.
Args:
db_path (str): Lmdb database path.
readonly (bool, optional): Lmdb environment parameter. If True,
disallow any write operations. Default: True.
lock (bool, optional): Lmdb environment parameter. If False, when
concurrent access occurs, do not lock the database. Default: False.
readahead (bool, optional): Lmdb environment parameter. If False,
disable the OS filesystem readahead mechanism, which may improve
random read performance when a database is larger than RAM.
Default: False.
Attributes:
db_path (str): Lmdb database path.
"""
def __init__(self,
db_path,
readonly=True,
lock=False,
readahead=False,
**kwargs):
try:
import lmdb
except ImportError:
raise ImportError('Please install lmdb to enable LmdbBackend.')
self.db_path = str(db_path)
self._client = lmdb.open(
self.db_path,
readonly=readonly,
lock=lock,
readahead=readahead,
**kwargs)
# MASKED: get function (lines 164-173)
def get_text(self, filepath):
raise NotImplementedError
def is_zip_path(path):
return '.zip@' in path
class HardDiskBackend(BaseStorageBackend):
"""Raw hard disks storage backend."""
def get(self, filepath):
filepath = str(filepath)
if is_zip_path(filepath):
value_buf = ZipReader.read(filepath)
else:
with open(filepath, 'rb') as f:
value_buf = f.read()
return value_buf
def get_text(self, filepath):
filepath = str(filepath)
with open(filepath, 'r') as f:
value_buf = f.read()
return value_buf
class FileClient(object):
"""A general file client to access files in different backend.
The client loads a file or text in a specified backend from its path
and return it as a binary file. it can also register other backend
accessor with a given name and backend class.
Attributes:
backend (str): The storage backend type. Options are "disk", "ceph",
"memcached" and "lmdb".
client (:obj:`BaseStorageBackend`): The backend object.
"""
_backends = {
'disk': HardDiskBackend,
'ceph': CephBackend,
'memcached': MemcachedBackend,
'lmdb': LmdbBackend,
'petrel': PetrelBackend,
}
def __init__(self, backend='disk', **kwargs):
if backend not in self._backends:
raise ValueError(
f'Backend {backend} is not supported. Currently supported ones'
f' are {list(self._backends.keys())}')
self.backend = backend
self.client = self._backends[backend](**kwargs)
@classmethod
def register_backend(cls, name, backend):
if not inspect.isclass(backend):
raise TypeError(
f'backend should be a class but got {type(backend)}')
if not issubclass(backend, BaseStorageBackend):
raise TypeError(
f'backend {backend} is not a subclass of BaseStorageBackend')
cls._backends[name] = backend
def get(self, filepath):
return self.client.get(filepath)
def get_text(self, filepath):
return self.client.get_text(filepath)
|
def get(self, filepath):
"""Get values according to the filepath.
Args:
filepath (str | obj:`Path`): Here, filepath is the lmdb key.
"""
filepath = str(filepath)
with self._client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode('ascii'))
return value_buf
| 164
| 173
|
import inspect
import warnings
from abc import ABCMeta, abstractmethod
from mmcv_custom.fileio.zipreader import ZipReader
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: `get()` and `get_text()`.
`get()` reads the file as a byte stream and `get_text()` reads the file
as texts.
"""
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
class CephBackend(BaseStorageBackend):
"""Ceph storage backend.
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
"""
def __init__(self, path_mapping=None):
try:
import ceph
warnings.warn('Ceph is deprecate in favor of Petrel.')
except ImportError:
raise ImportError('Please install ceph to enable CephBackend.')
self._client = ceph.S3Client()
assert isinstance(path_mapping, dict) or path_mapping is None
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if self.path_mapping is not None:
for k, v in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class PetrelBackend(BaseStorageBackend):
"""Petrel storage backend (for internal use).
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
"""
def __init__(self, path_mapping=None):
try:
from petrel_client import client
except ImportError:
raise ImportError('Please install petrel_client to enable '
'PetrelBackend.')
self._client = client.Client()
assert isinstance(path_mapping, dict) or path_mapping is None
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if self.path_mapping is not None:
for k, v in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class MemcachedBackend(BaseStorageBackend):
"""Memcached storage backend.
Attributes:
server_list_cfg (str): Config file for memcached server list.
client_cfg (str): Config file for memcached client.
sys_path (str | None): Additional path to be appended to `sys.path`.
Default: None.
"""
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
if sys_path is not None:
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError(
'Please install memcached to enable MemcachedBackend.')
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
self.client_cfg)
# mc.pyvector servers as a point which points to a memory cache
self._mc_buffer = mc.pyvector()
def get(self, filepath):
filepath = str(filepath)
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class LmdbBackend(BaseStorageBackend):
"""Lmdb storage backend.
Args:
db_path (str): Lmdb database path.
readonly (bool, optional): Lmdb environment parameter. If True,
disallow any write operations. Default: True.
lock (bool, optional): Lmdb environment parameter. If False, when
concurrent access occurs, do not lock the database. Default: False.
readahead (bool, optional): Lmdb environment parameter. If False,
disable the OS filesystem readahead mechanism, which may improve
random read performance when a database is larger than RAM.
Default: False.
Attributes:
db_path (str): Lmdb database path.
"""
def __init__(self,
db_path,
readonly=True,
lock=False,
readahead=False,
**kwargs):
try:
import lmdb
except ImportError:
raise ImportError('Please install lmdb to enable LmdbBackend.')
self.db_path = str(db_path)
self._client = lmdb.open(
self.db_path,
readonly=readonly,
lock=lock,
readahead=readahead,
**kwargs)
def get(self, filepath):
"""Get values according to the filepath.
Args:
filepath (str | obj:`Path`): Here, filepath is the lmdb key.
"""
filepath = str(filepath)
with self._client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode('ascii'))
return value_buf
def get_text(self, filepath):
raise NotImplementedError
def is_zip_path(path):
return '.zip@' in path
class HardDiskBackend(BaseStorageBackend):
"""Raw hard disks storage backend."""
def get(self, filepath):
filepath = str(filepath)
if is_zip_path(filepath):
value_buf = ZipReader.read(filepath)
else:
with open(filepath, 'rb') as f:
value_buf = f.read()
return value_buf
def get_text(self, filepath):
filepath = str(filepath)
with open(filepath, 'r') as f:
value_buf = f.read()
return value_buf
class FileClient(object):
"""A general file client to access files in different backend.
The client loads a file or text in a specified backend from its path
and return it as a binary file. it can also register other backend
accessor with a given name and backend class.
Attributes:
backend (str): The storage backend type. Options are "disk", "ceph",
"memcached" and "lmdb".
client (:obj:`BaseStorageBackend`): The backend object.
"""
_backends = {
'disk': HardDiskBackend,
'ceph': CephBackend,
'memcached': MemcachedBackend,
'lmdb': LmdbBackend,
'petrel': PetrelBackend,
}
def __init__(self, backend='disk', **kwargs):
if backend not in self._backends:
raise ValueError(
f'Backend {backend} is not supported. Currently supported ones'
f' are {list(self._backends.keys())}')
self.backend = backend
self.client = self._backends[backend](**kwargs)
@classmethod
def register_backend(cls, name, backend):
if not inspect.isclass(backend):
raise TypeError(
f'backend should be a class but got {type(backend)}')
if not issubclass(backend, BaseStorageBackend):
raise TypeError(
f'backend {backend} is not a subclass of BaseStorageBackend')
cls._backends[name] = backend
def get(self, filepath):
return self.client.get(filepath)
def get_text(self, filepath):
return self.client.get_text(filepath)
|
file_extension_for_content_type
|
Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True
|
# pylint: disable=no-member, redefined-outer-name
"""
Annalist resource types module
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
# import logging
# log = logging.getLogger(__name__)
from annalist.identifiers import ANNAL
# """
# Each resource type URI or CURIE is associated with a list of one or more file
# extensions and MIME content-types.
#
# The first of each list indicates the value used when creating or serving a
# resource of the indicated type. Any other values given are alternatives
# that are accepted as supplying a resource that is compatible with the type.
#
# File extensions and MIME types are presented as pairs so that an extension
# can be inferred when a MIME content-type is given, and vice versa.
# """
resource_types = (
{ ANNAL.CURIE.Metadata:
[ ("jsonld", "application/ld+json")
, ("json", "application/json")
]
, ANNAL.CURIE.Text:
[ ("txt", "text/plain")
]
, ANNAL.CURIE.Richtext:
[ ("md", "text/markdown")
, ("txt", "text/plain")
]
, ANNAL.CURIE.Image:
[ ("image", "image/*") # Default extension
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
, ANNAL.CURIE.Audio:
[ ("audio", "audio/*") # Default extension
, ("mp3", "audio/mpeg")
, ("mp4", "audio/mp4")
, ("wav", "audio/wav")
, ("ogg", "audio/ogg")
#@@ needs fleshing out?
]
, ANNAL.CURIE.Resource:
[ ("md", "text/markdown")
, ("txt", "text/plain")
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
})
default_types = [("dat", "application/octet-stream")]
def file_extension(typeuri):
"""
Returns preferred file extension for resource type
>>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"
True
>>> file_extension(ANNAL.CURIE.Richtext) == "md"
True
"""
return resource_types.get(typeuri, default_types)[0][0]
def content_type(typeuri):
"""
Returns preferred MIME content-type for resource type
>>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"
True
>>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"
True
"""
return resource_types.get(typeuri, default_types)[0][1]
# MASKED: file_extension_for_content_type function (lines 99-117)
def content_type_for_file_extension(typeuri, file_extension):
"""
Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if fe == file_extension:
return ct
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
def file_extension_for_content_type(typeuri, content_type):
"""
Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if ct == content_type:
return fe
return None
| 99
| 117
|
# pylint: disable=no-member, redefined-outer-name
"""
Annalist resource types module
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
# import logging
# log = logging.getLogger(__name__)
from annalist.identifiers import ANNAL
# """
# Each resource type URI or CURIE is associated with a list of one or more file
# extensions and MIME content-types.
#
# The first of each list indicates the value used when creating or serving a
# resource of the indicated type. Any other values given are alternatives
# that are accepted as supplying a resource that is compatible with the type.
#
# File extensions and MIME types are presented as pairs so that an extension
# can be inferred when a MIME content-type is given, and vice versa.
# """
resource_types = (
{ ANNAL.CURIE.Metadata:
[ ("jsonld", "application/ld+json")
, ("json", "application/json")
]
, ANNAL.CURIE.Text:
[ ("txt", "text/plain")
]
, ANNAL.CURIE.Richtext:
[ ("md", "text/markdown")
, ("txt", "text/plain")
]
, ANNAL.CURIE.Image:
[ ("image", "image/*") # Default extension
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
, ANNAL.CURIE.Audio:
[ ("audio", "audio/*") # Default extension
, ("mp3", "audio/mpeg")
, ("mp4", "audio/mp4")
, ("wav", "audio/wav")
, ("ogg", "audio/ogg")
#@@ needs fleshing out?
]
, ANNAL.CURIE.Resource:
[ ("md", "text/markdown")
, ("txt", "text/plain")
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
})
default_types = [("dat", "application/octet-stream")]
def file_extension(typeuri):
"""
Returns preferred file extension for resource type
>>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"
True
>>> file_extension(ANNAL.CURIE.Richtext) == "md"
True
"""
return resource_types.get(typeuri, default_types)[0][0]
def content_type(typeuri):
"""
Returns preferred MIME content-type for resource type
>>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"
True
>>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"
True
"""
return resource_types.get(typeuri, default_types)[0][1]
def file_extension_for_content_type(typeuri, content_type):
"""
Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if ct == content_type:
return fe
return None
def content_type_for_file_extension(typeuri, file_extension):
"""
Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if fe == file_extension:
return ct
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
content_type_for_file_extension
|
Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True
|
# pylint: disable=no-member, redefined-outer-name
"""
Annalist resource types module
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
# import logging
# log = logging.getLogger(__name__)
from annalist.identifiers import ANNAL
# """
# Each resource type URI or CURIE is associated with a list of one or more file
# extensions and MIME content-types.
#
# The first of each list indicates the value used when creating or serving a
# resource of the indicated type. Any other values given are alternatives
# that are accepted as supplying a resource that is compatible with the type.
#
# File extensions and MIME types are presented as pairs so that an extension
# can be inferred when a MIME content-type is given, and vice versa.
# """
resource_types = (
{ ANNAL.CURIE.Metadata:
[ ("jsonld", "application/ld+json")
, ("json", "application/json")
]
, ANNAL.CURIE.Text:
[ ("txt", "text/plain")
]
, ANNAL.CURIE.Richtext:
[ ("md", "text/markdown")
, ("txt", "text/plain")
]
, ANNAL.CURIE.Image:
[ ("image", "image/*") # Default extension
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
, ANNAL.CURIE.Audio:
[ ("audio", "audio/*") # Default extension
, ("mp3", "audio/mpeg")
, ("mp4", "audio/mp4")
, ("wav", "audio/wav")
, ("ogg", "audio/ogg")
#@@ needs fleshing out?
]
, ANNAL.CURIE.Resource:
[ ("md", "text/markdown")
, ("txt", "text/plain")
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
})
default_types = [("dat", "application/octet-stream")]
def file_extension(typeuri):
"""
Returns preferred file extension for resource type
>>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"
True
>>> file_extension(ANNAL.CURIE.Richtext) == "md"
True
"""
return resource_types.get(typeuri, default_types)[0][0]
def content_type(typeuri):
"""
Returns preferred MIME content-type for resource type
>>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"
True
>>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"
True
"""
return resource_types.get(typeuri, default_types)[0][1]
def file_extension_for_content_type(typeuri, content_type):
"""
Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if ct == content_type:
return fe
return None
# MASKED: content_type_for_file_extension function (lines 119-137)
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
def content_type_for_file_extension(typeuri, file_extension):
"""
Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if fe == file_extension:
return ct
return None
| 119
| 137
|
# pylint: disable=no-member, redefined-outer-name
"""
Annalist resource types module
"""
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
__author__ = "Graham Klyne (GK@ACM.ORG)"
__copyright__ = "Copyright 2015, G. Klyne"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
# import logging
# log = logging.getLogger(__name__)
from annalist.identifiers import ANNAL
# """
# Each resource type URI or CURIE is associated with a list of one or more file
# extensions and MIME content-types.
#
# The first of each list indicates the value used when creating or serving a
# resource of the indicated type. Any other values given are alternatives
# that are accepted as supplying a resource that is compatible with the type.
#
# File extensions and MIME types are presented as pairs so that an extension
# can be inferred when a MIME content-type is given, and vice versa.
# """
resource_types = (
{ ANNAL.CURIE.Metadata:
[ ("jsonld", "application/ld+json")
, ("json", "application/json")
]
, ANNAL.CURIE.Text:
[ ("txt", "text/plain")
]
, ANNAL.CURIE.Richtext:
[ ("md", "text/markdown")
, ("txt", "text/plain")
]
, ANNAL.CURIE.Image:
[ ("image", "image/*") # Default extension
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
, ANNAL.CURIE.Audio:
[ ("audio", "audio/*") # Default extension
, ("mp3", "audio/mpeg")
, ("mp4", "audio/mp4")
, ("wav", "audio/wav")
, ("ogg", "audio/ogg")
#@@ needs fleshing out?
]
, ANNAL.CURIE.Resource:
[ ("md", "text/markdown")
, ("txt", "text/plain")
, ("png", "image/png")
, ("jpg", "image/jpeg")
, ("jpeg", "image/jpeg")
, ("gif", "image/gif")
, ("tiff", "image/tiff")
, ("svg", "image/svg")
, ("pdf", "application/pdf")
]
})
default_types = [("dat", "application/octet-stream")]
def file_extension(typeuri):
"""
Returns preferred file extension for resource type
>>> file_extension(ANNAL.CURIE.Metadata) == "jsonld"
True
>>> file_extension(ANNAL.CURIE.Richtext) == "md"
True
"""
return resource_types.get(typeuri, default_types)[0][0]
def content_type(typeuri):
"""
Returns preferred MIME content-type for resource type
>>> content_type(ANNAL.CURIE.Metadata) == "application/ld+json"
True
>>> content_type(ANNAL.CURIE.Richtext) == "text/markdown"
True
"""
return resource_types.get(typeuri, default_types)[0][1]
def file_extension_for_content_type(typeuri, content_type):
"""
Returns file extension for given content-type as an instance of a given type URI,
or None.
>>> file_extension_for_content_type(ANNAL.CURIE.Richtext, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "text/markdown") == "md"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/pdf") == "pdf"
True
>>> file_extension_for_content_type(ANNAL.CURIE.Resource, "application/unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if ct == content_type:
return fe
return None
def content_type_for_file_extension(typeuri, file_extension):
"""
Returns content-type for given file extension as an instance of a given type URI,
or None.
>>> content_type_for_file_extension(ANNAL.CURIE.Richtext, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "md") == "text/markdown"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "pdf") == "application/pdf"
True
>>> content_type_for_file_extension(ANNAL.CURIE.Resource, "unknown") == None
True
"""
for fe, ct in resource_types.get(typeuri, default_types):
if fe == file_extension:
return ct
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
# End.
|
rotate
|
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
|
# This file is part of
# the galxy-chop project (https://github.com/vcristiani/galaxy-chop)
# Copyright (c) 2020, Valeria Cristiani
# License: MIT
# Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt
"""Fixtures input data."""
# =============================================================================
# IMPORTS
# =============================================================================
import os
from pathlib import Path
import astropy.units as u
from galaxychop import core
import numpy as np
import pytest
# =============================================================================
# PATHS
# =============================================================================
PATH = Path(os.path.abspath(os.path.dirname(__file__)))
TEST_DATA_PATH = PATH / "test_data"
TEST_DATA_REAL_PATH = TEST_DATA_PATH / "real"
# =============================================================================
# Defining utility functions for mocking data
# =============================================================================
def rot_matrix_xaxis(theta=0):
"""
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[1, 0, 0],
[0, np.cos(theta), -1 * np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
)
return A
def rot_matrix_yaxis(theta=0):
"""
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-1 * np.sin(theta), 0, np.cos(theta)],
]
)
return A
def rot_matrix_zaxis(theta=0):
"""
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), -1 * np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
return A
# MASKED: rotate function (lines 109-135)
def distance(x, y, z, m):
"""
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
"""
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, N_part - 1):
for j in range(i + 1, N_part):
dx[i, j] = x[j] - x[i]
dy[i, j] = y[j] - y[i]
dz[i, j] = z[j] - z[i]
dx[j, i] = -dx[i, j]
dy[j, i] = -dy[i, j]
dz[j, i] = -dz[i, j]
return dx, dy, dz
def epot(x, y, z, m, eps=0.0):
"""
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
"""
G = 4.299e-6
N_part = len(m)
U = np.zeros((N_part, N_part))
dx, dy, dz = distance(x, y, z, m)
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2 + eps ** 2)
for i in range(N_part - 1):
for j in range(i + 1, N_part):
U[i, j] = G * m[j] * m[i] / dist[i, j]
U[j, i] = U[i, j]
Upot = np.sum(U / m, axis=0)
return Upot
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def random_galaxy_params():
"""
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
"""
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {
"m_s": m_s,
"x_s": x_s,
"y_s": y_s,
"z_s": z_s,
"vx_s": vx_s,
"vy_s": vy_s,
"vz_s": vz_s,
"m_dm": m_dm,
"x_dm": x_dm,
"y_dm": y_dm,
"z_dm": z_dm,
"vx_dm": vx_dm,
"vy_dm": vy_dm,
"vz_dm": vz_dm,
"m_g": m_g,
"x_g": x_g,
"y_g": y_g,
"z_g": z_g,
"vx_g": vx_g,
"vy_g": vy_g,
"vz_g": vz_g,
}
return params
return make
@pytest.fixture(scope="session")
def solid_disk():
"""
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
"""
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (rmax - rmin) * random.random_sample(size=N_part) + rmin
phi0 = 2 * np.pi * random.random_sample(size=N_part)
mass = 1.0e8 * np.ones_like(r)
x = r * np.cos(phi0)
y = r * np.sin(phi0)
z = 1 * random.random_sample(size=N_part) - 0.5
xdot = -1 * omega * r * np.sin(phi0)
ydot = omega * r * np.cos(phi0)
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return mass, pos, vel
return make
@pytest.fixture(scope="session")
def mock_dm_halo():
"""
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
"""
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = random.random_sample(size=N_part) * rmax
cos_t = random.random_sample(size=N_part) * 2.0 - 1
phi0 = 2 * np.pi * random.random_sample(size=N_part)
sin_t = np.sqrt(1 - cos_t ** 2)
mass = 1.0e10 * np.ones_like(r)
x = r * sin_t * np.cos(phi0)
y = r * sin_t * np.sin(phi0)
z = r * cos_t
pos = np.array([x, y, z]).T
return mass, pos
return make
@pytest.fixture
def disc_zero_angle(solid_disk):
"""Disc with no angle of inclination."""
mass, pos, vel = solid_disk(N_part=1000)
return mass, pos, vel
@pytest.fixture
def disc_xrotation(solid_disk):
"""Disc rotated over x axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_yrotation(solid_disk):
"""Disc rotated over y axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_zrotation(solid_disk):
"""Disc rotated over z axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_particles(solid_disk):
"""Solid disc without velocities."""
mass, pos, vel = solid_disk(N_part=100)
return pos[:, 0], pos[:, 1], pos[:, 2], mass
@pytest.fixture
def disc_particles_all(solid_disk):
"""Solid disc with velocities."""
mass_s, pos_s, vel_s = solid_disk(N_part=100)
mass_g, pos_g, vel_g = solid_disk(N_part=100)
return mass_s, pos_s, vel_s, mass_g, pos_g, vel_g
@pytest.fixture(scope="session")
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
"""Mock galaxy."""
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
mass_dm, pos_dm, vel_dm = halo_particles(N_part=100, seed=42)
g = core.Galaxy(
m_s=mass_s * u.M_sun,
x_s=pos_s[:, 0] * u.kpc,
y_s=pos_s[:, 1] * u.kpc,
z_s=pos_s[:, 2] * u.kpc,
vx_s=vel_s[:, 0] * (u.km / u.s),
vy_s=vel_s[:, 1] * (u.km / u.s),
vz_s=vel_s[:, 2] * (u.km / u.s),
m_dm=mass_dm * u.M_sun,
x_dm=pos_dm[:, 0] * u.kpc,
y_dm=pos_dm[:, 1] * u.kpc,
z_dm=pos_dm[:, 2] * u.kpc,
vx_dm=vel_dm[:, 0] * (u.km / u.s),
vy_dm=vel_dm[:, 1] * (u.km / u.s),
vz_dm=vel_dm[:, 2] * (u.km / u.s),
m_g=mass_g * u.M_sun,
x_g=pos_g[:, 0] * u.kpc,
y_g=pos_g[:, 1] * u.kpc,
z_g=pos_g[:, 2] * u.kpc,
vx_g=vel_g[:, 0] * (u.km / u.s),
vy_g=vel_g[:, 1] * (u.km / u.s),
vz_g=vel_g[:, 2] * (u.km / u.s),
)
return g
@pytest.fixture
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
|
def rotate(pos, vel, matrix):
"""
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
"""
pos_rot = pos @ matrix
vel_rot = vel @ matrix
return pos_rot, vel_rot
| 109
| 135
|
# This file is part of
# the galxy-chop project (https://github.com/vcristiani/galaxy-chop)
# Copyright (c) 2020, Valeria Cristiani
# License: MIT
# Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt
"""Fixtures input data."""
# =============================================================================
# IMPORTS
# =============================================================================
import os
from pathlib import Path
import astropy.units as u
from galaxychop import core
import numpy as np
import pytest
# =============================================================================
# PATHS
# =============================================================================
PATH = Path(os.path.abspath(os.path.dirname(__file__)))
TEST_DATA_PATH = PATH / "test_data"
TEST_DATA_REAL_PATH = TEST_DATA_PATH / "real"
# =============================================================================
# Defining utility functions for mocking data
# =============================================================================
def rot_matrix_xaxis(theta=0):
"""
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[1, 0, 0],
[0, np.cos(theta), -1 * np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
)
return A
def rot_matrix_yaxis(theta=0):
"""
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-1 * np.sin(theta), 0, np.cos(theta)],
]
)
return A
def rot_matrix_zaxis(theta=0):
"""
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), -1 * np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
return A
def rotate(pos, vel, matrix):
"""
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
"""
pos_rot = pos @ matrix
vel_rot = vel @ matrix
return pos_rot, vel_rot
def distance(x, y, z, m):
"""
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
"""
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, N_part - 1):
for j in range(i + 1, N_part):
dx[i, j] = x[j] - x[i]
dy[i, j] = y[j] - y[i]
dz[i, j] = z[j] - z[i]
dx[j, i] = -dx[i, j]
dy[j, i] = -dy[i, j]
dz[j, i] = -dz[i, j]
return dx, dy, dz
def epot(x, y, z, m, eps=0.0):
"""
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
"""
G = 4.299e-6
N_part = len(m)
U = np.zeros((N_part, N_part))
dx, dy, dz = distance(x, y, z, m)
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2 + eps ** 2)
for i in range(N_part - 1):
for j in range(i + 1, N_part):
U[i, j] = G * m[j] * m[i] / dist[i, j]
U[j, i] = U[i, j]
Upot = np.sum(U / m, axis=0)
return Upot
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def random_galaxy_params():
"""
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
"""
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {
"m_s": m_s,
"x_s": x_s,
"y_s": y_s,
"z_s": z_s,
"vx_s": vx_s,
"vy_s": vy_s,
"vz_s": vz_s,
"m_dm": m_dm,
"x_dm": x_dm,
"y_dm": y_dm,
"z_dm": z_dm,
"vx_dm": vx_dm,
"vy_dm": vy_dm,
"vz_dm": vz_dm,
"m_g": m_g,
"x_g": x_g,
"y_g": y_g,
"z_g": z_g,
"vx_g": vx_g,
"vy_g": vy_g,
"vz_g": vz_g,
}
return params
return make
@pytest.fixture(scope="session")
def solid_disk():
"""
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
"""
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (rmax - rmin) * random.random_sample(size=N_part) + rmin
phi0 = 2 * np.pi * random.random_sample(size=N_part)
mass = 1.0e8 * np.ones_like(r)
x = r * np.cos(phi0)
y = r * np.sin(phi0)
z = 1 * random.random_sample(size=N_part) - 0.5
xdot = -1 * omega * r * np.sin(phi0)
ydot = omega * r * np.cos(phi0)
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return mass, pos, vel
return make
@pytest.fixture(scope="session")
def mock_dm_halo():
"""
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
"""
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = random.random_sample(size=N_part) * rmax
cos_t = random.random_sample(size=N_part) * 2.0 - 1
phi0 = 2 * np.pi * random.random_sample(size=N_part)
sin_t = np.sqrt(1 - cos_t ** 2)
mass = 1.0e10 * np.ones_like(r)
x = r * sin_t * np.cos(phi0)
y = r * sin_t * np.sin(phi0)
z = r * cos_t
pos = np.array([x, y, z]).T
return mass, pos
return make
@pytest.fixture
def disc_zero_angle(solid_disk):
"""Disc with no angle of inclination."""
mass, pos, vel = solid_disk(N_part=1000)
return mass, pos, vel
@pytest.fixture
def disc_xrotation(solid_disk):
"""Disc rotated over x axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_yrotation(solid_disk):
"""Disc rotated over y axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_zrotation(solid_disk):
"""Disc rotated over z axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_particles(solid_disk):
"""Solid disc without velocities."""
mass, pos, vel = solid_disk(N_part=100)
return pos[:, 0], pos[:, 1], pos[:, 2], mass
@pytest.fixture
def disc_particles_all(solid_disk):
"""Solid disc with velocities."""
mass_s, pos_s, vel_s = solid_disk(N_part=100)
mass_g, pos_g, vel_g = solid_disk(N_part=100)
return mass_s, pos_s, vel_s, mass_g, pos_g, vel_g
@pytest.fixture(scope="session")
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
"""Mock galaxy."""
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
mass_dm, pos_dm, vel_dm = halo_particles(N_part=100, seed=42)
g = core.Galaxy(
m_s=mass_s * u.M_sun,
x_s=pos_s[:, 0] * u.kpc,
y_s=pos_s[:, 1] * u.kpc,
z_s=pos_s[:, 2] * u.kpc,
vx_s=vel_s[:, 0] * (u.km / u.s),
vy_s=vel_s[:, 1] * (u.km / u.s),
vz_s=vel_s[:, 2] * (u.km / u.s),
m_dm=mass_dm * u.M_sun,
x_dm=pos_dm[:, 0] * u.kpc,
y_dm=pos_dm[:, 1] * u.kpc,
z_dm=pos_dm[:, 2] * u.kpc,
vx_dm=vel_dm[:, 0] * (u.km / u.s),
vy_dm=vel_dm[:, 1] * (u.km / u.s),
vz_dm=vel_dm[:, 2] * (u.km / u.s),
m_g=mass_g * u.M_sun,
x_g=pos_g[:, 0] * u.kpc,
y_g=pos_g[:, 1] * u.kpc,
z_g=pos_g[:, 2] * u.kpc,
vx_g=vel_g[:, 0] * (u.km / u.s),
vy_g=vel_g[:, 1] * (u.km / u.s),
vz_g=vel_g[:, 2] * (u.km / u.s),
)
return g
@pytest.fixture
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
|
distance
|
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
|
# This file is part of
# the galxy-chop project (https://github.com/vcristiani/galaxy-chop)
# Copyright (c) 2020, Valeria Cristiani
# License: MIT
# Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt
"""Fixtures input data."""
# =============================================================================
# IMPORTS
# =============================================================================
import os
from pathlib import Path
import astropy.units as u
from galaxychop import core
import numpy as np
import pytest
# =============================================================================
# PATHS
# =============================================================================
PATH = Path(os.path.abspath(os.path.dirname(__file__)))
TEST_DATA_PATH = PATH / "test_data"
TEST_DATA_REAL_PATH = TEST_DATA_PATH / "real"
# =============================================================================
# Defining utility functions for mocking data
# =============================================================================
def rot_matrix_xaxis(theta=0):
"""
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[1, 0, 0],
[0, np.cos(theta), -1 * np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
)
return A
def rot_matrix_yaxis(theta=0):
"""
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-1 * np.sin(theta), 0, np.cos(theta)],
]
)
return A
def rot_matrix_zaxis(theta=0):
"""
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), -1 * np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
return A
def rotate(pos, vel, matrix):
"""
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
"""
pos_rot = pos @ matrix
vel_rot = vel @ matrix
return pos_rot, vel_rot
# MASKED: distance function (lines 138-172)
def epot(x, y, z, m, eps=0.0):
"""
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
"""
G = 4.299e-6
N_part = len(m)
U = np.zeros((N_part, N_part))
dx, dy, dz = distance(x, y, z, m)
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2 + eps ** 2)
for i in range(N_part - 1):
for j in range(i + 1, N_part):
U[i, j] = G * m[j] * m[i] / dist[i, j]
U[j, i] = U[i, j]
Upot = np.sum(U / m, axis=0)
return Upot
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def random_galaxy_params():
"""
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
"""
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {
"m_s": m_s,
"x_s": x_s,
"y_s": y_s,
"z_s": z_s,
"vx_s": vx_s,
"vy_s": vy_s,
"vz_s": vz_s,
"m_dm": m_dm,
"x_dm": x_dm,
"y_dm": y_dm,
"z_dm": z_dm,
"vx_dm": vx_dm,
"vy_dm": vy_dm,
"vz_dm": vz_dm,
"m_g": m_g,
"x_g": x_g,
"y_g": y_g,
"z_g": z_g,
"vx_g": vx_g,
"vy_g": vy_g,
"vz_g": vz_g,
}
return params
return make
@pytest.fixture(scope="session")
def solid_disk():
"""
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
"""
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (rmax - rmin) * random.random_sample(size=N_part) + rmin
phi0 = 2 * np.pi * random.random_sample(size=N_part)
mass = 1.0e8 * np.ones_like(r)
x = r * np.cos(phi0)
y = r * np.sin(phi0)
z = 1 * random.random_sample(size=N_part) - 0.5
xdot = -1 * omega * r * np.sin(phi0)
ydot = omega * r * np.cos(phi0)
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return mass, pos, vel
return make
@pytest.fixture(scope="session")
def mock_dm_halo():
"""
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
"""
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = random.random_sample(size=N_part) * rmax
cos_t = random.random_sample(size=N_part) * 2.0 - 1
phi0 = 2 * np.pi * random.random_sample(size=N_part)
sin_t = np.sqrt(1 - cos_t ** 2)
mass = 1.0e10 * np.ones_like(r)
x = r * sin_t * np.cos(phi0)
y = r * sin_t * np.sin(phi0)
z = r * cos_t
pos = np.array([x, y, z]).T
return mass, pos
return make
@pytest.fixture
def disc_zero_angle(solid_disk):
"""Disc with no angle of inclination."""
mass, pos, vel = solid_disk(N_part=1000)
return mass, pos, vel
@pytest.fixture
def disc_xrotation(solid_disk):
"""Disc rotated over x axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_yrotation(solid_disk):
"""Disc rotated over y axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_zrotation(solid_disk):
"""Disc rotated over z axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_particles(solid_disk):
"""Solid disc without velocities."""
mass, pos, vel = solid_disk(N_part=100)
return pos[:, 0], pos[:, 1], pos[:, 2], mass
@pytest.fixture
def disc_particles_all(solid_disk):
"""Solid disc with velocities."""
mass_s, pos_s, vel_s = solid_disk(N_part=100)
mass_g, pos_g, vel_g = solid_disk(N_part=100)
return mass_s, pos_s, vel_s, mass_g, pos_g, vel_g
@pytest.fixture(scope="session")
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
"""Mock galaxy."""
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
mass_dm, pos_dm, vel_dm = halo_particles(N_part=100, seed=42)
g = core.Galaxy(
m_s=mass_s * u.M_sun,
x_s=pos_s[:, 0] * u.kpc,
y_s=pos_s[:, 1] * u.kpc,
z_s=pos_s[:, 2] * u.kpc,
vx_s=vel_s[:, 0] * (u.km / u.s),
vy_s=vel_s[:, 1] * (u.km / u.s),
vz_s=vel_s[:, 2] * (u.km / u.s),
m_dm=mass_dm * u.M_sun,
x_dm=pos_dm[:, 0] * u.kpc,
y_dm=pos_dm[:, 1] * u.kpc,
z_dm=pos_dm[:, 2] * u.kpc,
vx_dm=vel_dm[:, 0] * (u.km / u.s),
vy_dm=vel_dm[:, 1] * (u.km / u.s),
vz_dm=vel_dm[:, 2] * (u.km / u.s),
m_g=mass_g * u.M_sun,
x_g=pos_g[:, 0] * u.kpc,
y_g=pos_g[:, 1] * u.kpc,
z_g=pos_g[:, 2] * u.kpc,
vx_g=vel_g[:, 0] * (u.km / u.s),
vy_g=vel_g[:, 1] * (u.km / u.s),
vz_g=vel_g[:, 2] * (u.km / u.s),
)
return g
@pytest.fixture
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
|
def distance(x, y, z, m):
"""
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
"""
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, N_part - 1):
for j in range(i + 1, N_part):
dx[i, j] = x[j] - x[i]
dy[i, j] = y[j] - y[i]
dz[i, j] = z[j] - z[i]
dx[j, i] = -dx[i, j]
dy[j, i] = -dy[i, j]
dz[j, i] = -dz[i, j]
return dx, dy, dz
| 138
| 172
|
# This file is part of
# the galxy-chop project (https://github.com/vcristiani/galaxy-chop)
# Copyright (c) 2020, Valeria Cristiani
# License: MIT
# Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt
"""Fixtures input data."""
# =============================================================================
# IMPORTS
# =============================================================================
import os
from pathlib import Path
import astropy.units as u
from galaxychop import core
import numpy as np
import pytest
# =============================================================================
# PATHS
# =============================================================================
PATH = Path(os.path.abspath(os.path.dirname(__file__)))
TEST_DATA_PATH = PATH / "test_data"
TEST_DATA_REAL_PATH = TEST_DATA_PATH / "real"
# =============================================================================
# Defining utility functions for mocking data
# =============================================================================
def rot_matrix_xaxis(theta=0):
"""
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[1, 0, 0],
[0, np.cos(theta), -1 * np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
)
return A
def rot_matrix_yaxis(theta=0):
"""
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-1 * np.sin(theta), 0, np.cos(theta)],
]
)
return A
def rot_matrix_zaxis(theta=0):
"""
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), -1 * np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
return A
def rotate(pos, vel, matrix):
"""
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
"""
pos_rot = pos @ matrix
vel_rot = vel @ matrix
return pos_rot, vel_rot
def distance(x, y, z, m):
"""
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
"""
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, N_part - 1):
for j in range(i + 1, N_part):
dx[i, j] = x[j] - x[i]
dy[i, j] = y[j] - y[i]
dz[i, j] = z[j] - z[i]
dx[j, i] = -dx[i, j]
dy[j, i] = -dy[i, j]
dz[j, i] = -dz[i, j]
return dx, dy, dz
def epot(x, y, z, m, eps=0.0):
"""
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
"""
G = 4.299e-6
N_part = len(m)
U = np.zeros((N_part, N_part))
dx, dy, dz = distance(x, y, z, m)
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2 + eps ** 2)
for i in range(N_part - 1):
for j in range(i + 1, N_part):
U[i, j] = G * m[j] * m[i] / dist[i, j]
U[j, i] = U[i, j]
Upot = np.sum(U / m, axis=0)
return Upot
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def random_galaxy_params():
"""
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
"""
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {
"m_s": m_s,
"x_s": x_s,
"y_s": y_s,
"z_s": z_s,
"vx_s": vx_s,
"vy_s": vy_s,
"vz_s": vz_s,
"m_dm": m_dm,
"x_dm": x_dm,
"y_dm": y_dm,
"z_dm": z_dm,
"vx_dm": vx_dm,
"vy_dm": vy_dm,
"vz_dm": vz_dm,
"m_g": m_g,
"x_g": x_g,
"y_g": y_g,
"z_g": z_g,
"vx_g": vx_g,
"vy_g": vy_g,
"vz_g": vz_g,
}
return params
return make
@pytest.fixture(scope="session")
def solid_disk():
"""
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
"""
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (rmax - rmin) * random.random_sample(size=N_part) + rmin
phi0 = 2 * np.pi * random.random_sample(size=N_part)
mass = 1.0e8 * np.ones_like(r)
x = r * np.cos(phi0)
y = r * np.sin(phi0)
z = 1 * random.random_sample(size=N_part) - 0.5
xdot = -1 * omega * r * np.sin(phi0)
ydot = omega * r * np.cos(phi0)
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return mass, pos, vel
return make
@pytest.fixture(scope="session")
def mock_dm_halo():
"""
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
"""
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = random.random_sample(size=N_part) * rmax
cos_t = random.random_sample(size=N_part) * 2.0 - 1
phi0 = 2 * np.pi * random.random_sample(size=N_part)
sin_t = np.sqrt(1 - cos_t ** 2)
mass = 1.0e10 * np.ones_like(r)
x = r * sin_t * np.cos(phi0)
y = r * sin_t * np.sin(phi0)
z = r * cos_t
pos = np.array([x, y, z]).T
return mass, pos
return make
@pytest.fixture
def disc_zero_angle(solid_disk):
"""Disc with no angle of inclination."""
mass, pos, vel = solid_disk(N_part=1000)
return mass, pos, vel
@pytest.fixture
def disc_xrotation(solid_disk):
"""Disc rotated over x axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_yrotation(solid_disk):
"""Disc rotated over y axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_zrotation(solid_disk):
"""Disc rotated over z axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_particles(solid_disk):
"""Solid disc without velocities."""
mass, pos, vel = solid_disk(N_part=100)
return pos[:, 0], pos[:, 1], pos[:, 2], mass
@pytest.fixture
def disc_particles_all(solid_disk):
"""Solid disc with velocities."""
mass_s, pos_s, vel_s = solid_disk(N_part=100)
mass_g, pos_g, vel_g = solid_disk(N_part=100)
return mass_s, pos_s, vel_s, mass_g, pos_g, vel_g
@pytest.fixture(scope="session")
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
"""Mock galaxy."""
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
mass_dm, pos_dm, vel_dm = halo_particles(N_part=100, seed=42)
g = core.Galaxy(
m_s=mass_s * u.M_sun,
x_s=pos_s[:, 0] * u.kpc,
y_s=pos_s[:, 1] * u.kpc,
z_s=pos_s[:, 2] * u.kpc,
vx_s=vel_s[:, 0] * (u.km / u.s),
vy_s=vel_s[:, 1] * (u.km / u.s),
vz_s=vel_s[:, 2] * (u.km / u.s),
m_dm=mass_dm * u.M_sun,
x_dm=pos_dm[:, 0] * u.kpc,
y_dm=pos_dm[:, 1] * u.kpc,
z_dm=pos_dm[:, 2] * u.kpc,
vx_dm=vel_dm[:, 0] * (u.km / u.s),
vy_dm=vel_dm[:, 1] * (u.km / u.s),
vz_dm=vel_dm[:, 2] * (u.km / u.s),
m_g=mass_g * u.M_sun,
x_g=pos_g[:, 0] * u.kpc,
y_g=pos_g[:, 1] * u.kpc,
z_g=pos_g[:, 2] * u.kpc,
vx_g=vel_g[:, 0] * (u.km / u.s),
vy_g=vel_g[:, 1] * (u.km / u.s),
vz_g=vel_g[:, 2] * (u.km / u.s),
)
return g
@pytest.fixture
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
|
epot
|
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
|
# This file is part of
# the galxy-chop project (https://github.com/vcristiani/galaxy-chop)
# Copyright (c) 2020, Valeria Cristiani
# License: MIT
# Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt
"""Fixtures input data."""
# =============================================================================
# IMPORTS
# =============================================================================
import os
from pathlib import Path
import astropy.units as u
from galaxychop import core
import numpy as np
import pytest
# =============================================================================
# PATHS
# =============================================================================
PATH = Path(os.path.abspath(os.path.dirname(__file__)))
TEST_DATA_PATH = PATH / "test_data"
TEST_DATA_REAL_PATH = TEST_DATA_PATH / "real"
# =============================================================================
# Defining utility functions for mocking data
# =============================================================================
def rot_matrix_xaxis(theta=0):
"""
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[1, 0, 0],
[0, np.cos(theta), -1 * np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
)
return A
def rot_matrix_yaxis(theta=0):
"""
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-1 * np.sin(theta), 0, np.cos(theta)],
]
)
return A
def rot_matrix_zaxis(theta=0):
"""
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), -1 * np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
return A
def rotate(pos, vel, matrix):
"""
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
"""
pos_rot = pos @ matrix
vel_rot = vel @ matrix
return pos_rot, vel_rot
def distance(x, y, z, m):
"""
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
"""
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, N_part - 1):
for j in range(i + 1, N_part):
dx[i, j] = x[j] - x[i]
dy[i, j] = y[j] - y[i]
dz[i, j] = z[j] - z[i]
dx[j, i] = -dx[i, j]
dy[j, i] = -dy[i, j]
dz[j, i] = -dz[i, j]
return dx, dy, dz
# MASKED: epot function (lines 175-208)
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def random_galaxy_params():
"""
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
"""
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {
"m_s": m_s,
"x_s": x_s,
"y_s": y_s,
"z_s": z_s,
"vx_s": vx_s,
"vy_s": vy_s,
"vz_s": vz_s,
"m_dm": m_dm,
"x_dm": x_dm,
"y_dm": y_dm,
"z_dm": z_dm,
"vx_dm": vx_dm,
"vy_dm": vy_dm,
"vz_dm": vz_dm,
"m_g": m_g,
"x_g": x_g,
"y_g": y_g,
"z_g": z_g,
"vx_g": vx_g,
"vy_g": vy_g,
"vz_g": vz_g,
}
return params
return make
@pytest.fixture(scope="session")
def solid_disk():
"""
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
"""
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (rmax - rmin) * random.random_sample(size=N_part) + rmin
phi0 = 2 * np.pi * random.random_sample(size=N_part)
mass = 1.0e8 * np.ones_like(r)
x = r * np.cos(phi0)
y = r * np.sin(phi0)
z = 1 * random.random_sample(size=N_part) - 0.5
xdot = -1 * omega * r * np.sin(phi0)
ydot = omega * r * np.cos(phi0)
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return mass, pos, vel
return make
@pytest.fixture(scope="session")
def mock_dm_halo():
"""
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
"""
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = random.random_sample(size=N_part) * rmax
cos_t = random.random_sample(size=N_part) * 2.0 - 1
phi0 = 2 * np.pi * random.random_sample(size=N_part)
sin_t = np.sqrt(1 - cos_t ** 2)
mass = 1.0e10 * np.ones_like(r)
x = r * sin_t * np.cos(phi0)
y = r * sin_t * np.sin(phi0)
z = r * cos_t
pos = np.array([x, y, z]).T
return mass, pos
return make
@pytest.fixture
def disc_zero_angle(solid_disk):
"""Disc with no angle of inclination."""
mass, pos, vel = solid_disk(N_part=1000)
return mass, pos, vel
@pytest.fixture
def disc_xrotation(solid_disk):
"""Disc rotated over x axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_yrotation(solid_disk):
"""Disc rotated over y axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_zrotation(solid_disk):
"""Disc rotated over z axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_particles(solid_disk):
"""Solid disc without velocities."""
mass, pos, vel = solid_disk(N_part=100)
return pos[:, 0], pos[:, 1], pos[:, 2], mass
@pytest.fixture
def disc_particles_all(solid_disk):
"""Solid disc with velocities."""
mass_s, pos_s, vel_s = solid_disk(N_part=100)
mass_g, pos_g, vel_g = solid_disk(N_part=100)
return mass_s, pos_s, vel_s, mass_g, pos_g, vel_g
@pytest.fixture(scope="session")
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
"""Mock galaxy."""
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
mass_dm, pos_dm, vel_dm = halo_particles(N_part=100, seed=42)
g = core.Galaxy(
m_s=mass_s * u.M_sun,
x_s=pos_s[:, 0] * u.kpc,
y_s=pos_s[:, 1] * u.kpc,
z_s=pos_s[:, 2] * u.kpc,
vx_s=vel_s[:, 0] * (u.km / u.s),
vy_s=vel_s[:, 1] * (u.km / u.s),
vz_s=vel_s[:, 2] * (u.km / u.s),
m_dm=mass_dm * u.M_sun,
x_dm=pos_dm[:, 0] * u.kpc,
y_dm=pos_dm[:, 1] * u.kpc,
z_dm=pos_dm[:, 2] * u.kpc,
vx_dm=vel_dm[:, 0] * (u.km / u.s),
vy_dm=vel_dm[:, 1] * (u.km / u.s),
vz_dm=vel_dm[:, 2] * (u.km / u.s),
m_g=mass_g * u.M_sun,
x_g=pos_g[:, 0] * u.kpc,
y_g=pos_g[:, 1] * u.kpc,
z_g=pos_g[:, 2] * u.kpc,
vx_g=vel_g[:, 0] * (u.km / u.s),
vy_g=vel_g[:, 1] * (u.km / u.s),
vz_g=vel_g[:, 2] * (u.km / u.s),
)
return g
@pytest.fixture
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
|
def epot(x, y, z, m, eps=0.0):
"""
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
"""
G = 4.299e-6
N_part = len(m)
U = np.zeros((N_part, N_part))
dx, dy, dz = distance(x, y, z, m)
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2 + eps ** 2)
for i in range(N_part - 1):
for j in range(i + 1, N_part):
U[i, j] = G * m[j] * m[i] / dist[i, j]
U[j, i] = U[i, j]
Upot = np.sum(U / m, axis=0)
return Upot
| 175
| 208
|
# This file is part of
# the galxy-chop project (https://github.com/vcristiani/galaxy-chop)
# Copyright (c) 2020, Valeria Cristiani
# License: MIT
# Full Text: https://github.com/vcristiani/galaxy-chop/blob/master/LICENSE.txt
"""Fixtures input data."""
# =============================================================================
# IMPORTS
# =============================================================================
import os
from pathlib import Path
import astropy.units as u
from galaxychop import core
import numpy as np
import pytest
# =============================================================================
# PATHS
# =============================================================================
PATH = Path(os.path.abspath(os.path.dirname(__file__)))
TEST_DATA_PATH = PATH / "test_data"
TEST_DATA_REAL_PATH = TEST_DATA_PATH / "real"
# =============================================================================
# Defining utility functions for mocking data
# =============================================================================
def rot_matrix_xaxis(theta=0):
"""
Rotation matrix of a transformation around X axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[1, 0, 0],
[0, np.cos(theta), -1 * np.sin(theta)],
[0, np.sin(theta), np.cos(theta)],
]
)
return A
def rot_matrix_yaxis(theta=0):
"""
Rotation matrix of a transformation around Y axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-1 * np.sin(theta), 0, np.cos(theta)],
]
)
return A
def rot_matrix_zaxis(theta=0):
"""
Rotation matrix of a transformation around Z axis.
Parameters
----------
theta : `float`
Rotation angle in radians
Returns
-------
A : `np.ndarray`
Rotation matrix, with shape (3, 3)
"""
A = np.array(
[
[np.cos(theta), -1 * np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1],
]
)
return A
def rotate(pos, vel, matrix):
"""
Rotate.
Applies the rotation `matrix` to a set of particles positions `pos` and
velocities `vel`
Parameters
----------
pos : `np.ndarray`, shape = (N_part, 3)
Positions of particles
vel : `np.ndarray`, shape = (N_part, 3)
Velocities of particles
matrix : `np.ndarray`
Rotation matrix, with shape (3, 3)
Returns
-------
pos_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, positions of particles
vel_rot : `np.ndarray`, shape = (N_part, 3)
Rotated, velocities of particles
"""
pos_rot = pos @ matrix
vel_rot = vel @ matrix
return pos_rot, vel_rot
def distance(x, y, z, m):
"""
Distances calculator.
Calculate distances beetween particles.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
Returns
-------
dx, dy, dz: `np.ndarray`, shape = (N_part, N_part)
Distances between particles.
"""
N_part = len(m)
dx = np.zeros((N_part, N_part))
dy = np.zeros((N_part, N_part))
dz = np.zeros((N_part, N_part))
for i in range(0, N_part - 1):
for j in range(i + 1, N_part):
dx[i, j] = x[j] - x[i]
dy[i, j] = y[j] - y[i]
dz[i, j] = z[j] - z[i]
dx[j, i] = -dx[i, j]
dy[j, i] = -dy[i, j]
dz[j, i] = -dz[i, j]
return dx, dy, dz
def epot(x, y, z, m, eps=0.0):
"""
Potential energy with python.
Parameters
----------
x, y, z: `np.ndarray`, shape = (N_part, 1)
Positions
m : `np.ndarray`, shape = (N_part, 1)
Masses
eps: `float`
Softening radius
Returns
-------
Upot: `np.ndarray`, shape = (N_part, 1)
Potential energy of particles
"""
G = 4.299e-6
N_part = len(m)
U = np.zeros((N_part, N_part))
dx, dy, dz = distance(x, y, z, m)
dist = np.sqrt(dx ** 2 + dy ** 2 + dz ** 2 + eps ** 2)
for i in range(N_part - 1):
for j in range(i + 1, N_part):
U[i, j] = G * m[j] * m[i] / dist[i, j]
U[j, i] = U[i, j]
Upot = np.sum(U / m, axis=0)
return Upot
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def random_galaxy_params():
"""
Galaxy parameter for test.
This return a function of a dictionary with random params of a Galaxy
object
"""
def make(stars, gas, dm, seed):
random = np.random.default_rng(seed=seed)
x_s = random.random(stars)
y_s = random.random(stars)
z_s = random.random(stars)
vx_s = random.random(stars)
vy_s = random.random(stars)
vz_s = random.random(stars)
m_s = random.random(stars)
x_dm = random.random(dm)
y_dm = random.random(dm)
z_dm = random.random(dm)
vx_dm = random.random(dm)
vy_dm = random.random(dm)
vz_dm = random.random(dm)
m_dm = random.random(dm)
x_g = random.random(gas)
y_g = random.random(gas)
z_g = random.random(gas)
vx_g = random.random(gas)
vy_g = random.random(gas)
vz_g = random.random(gas)
m_g = random.random(gas)
params = {
"m_s": m_s,
"x_s": x_s,
"y_s": y_s,
"z_s": z_s,
"vx_s": vx_s,
"vy_s": vy_s,
"vz_s": vz_s,
"m_dm": m_dm,
"x_dm": x_dm,
"y_dm": y_dm,
"z_dm": z_dm,
"vx_dm": vx_dm,
"vy_dm": vy_dm,
"vz_dm": vz_dm,
"m_g": m_g,
"x_g": x_g,
"y_g": y_g,
"z_g": z_g,
"vx_g": vx_g,
"vy_g": vy_g,
"vz_g": vz_g,
}
return params
return make
@pytest.fixture(scope="session")
def solid_disk():
"""
Mock solid disk.
Creates a mock solid disc of particles with masses
and velocities.
"""
def make(N_part=100, rmax=30, rmin=2, omega=10, seed=42):
random = np.random.RandomState(seed=seed)
r = (rmax - rmin) * random.random_sample(size=N_part) + rmin
phi0 = 2 * np.pi * random.random_sample(size=N_part)
mass = 1.0e8 * np.ones_like(r)
x = r * np.cos(phi0)
y = r * np.sin(phi0)
z = 1 * random.random_sample(size=N_part) - 0.5
xdot = -1 * omega * r * np.sin(phi0)
ydot = omega * r * np.cos(phi0)
zdot = np.zeros_like(xdot)
pos = np.array([x, y, z]).T
vel = np.array([xdot, ydot, zdot]).T
return mass, pos, vel
return make
@pytest.fixture(scope="session")
def mock_dm_halo():
"""
Mock dark matter Halo.
Creates a mock DM halo of particles with masses
and velocities.
"""
def make(N_part=1000, rmax=100, seed=55):
random = np.random.RandomState(seed=seed)
r = random.random_sample(size=N_part) * rmax
cos_t = random.random_sample(size=N_part) * 2.0 - 1
phi0 = 2 * np.pi * random.random_sample(size=N_part)
sin_t = np.sqrt(1 - cos_t ** 2)
mass = 1.0e10 * np.ones_like(r)
x = r * sin_t * np.cos(phi0)
y = r * sin_t * np.sin(phi0)
z = r * cos_t
pos = np.array([x, y, z]).T
return mass, pos
return make
@pytest.fixture
def disc_zero_angle(solid_disk):
"""Disc with no angle of inclination."""
mass, pos, vel = solid_disk(N_part=1000)
return mass, pos, vel
@pytest.fixture
def disc_xrotation(solid_disk):
"""Disc rotated over x axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_xaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_yrotation(solid_disk):
"""Disc rotated over y axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_yaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_zrotation(solid_disk):
"""Disc rotated over z axis."""
mass, pos, vel = solid_disk(N_part=1000)
random = np.random.RandomState(seed=42)
a = rot_matrix_zaxis(theta=0.3 * np.pi * random.random())
return mass, pos @ a, vel @ a, a
@pytest.fixture
def disc_particles(solid_disk):
"""Solid disc without velocities."""
mass, pos, vel = solid_disk(N_part=100)
return pos[:, 0], pos[:, 1], pos[:, 2], mass
@pytest.fixture
def disc_particles_all(solid_disk):
"""Solid disc with velocities."""
mass_s, pos_s, vel_s = solid_disk(N_part=100)
mass_g, pos_g, vel_g = solid_disk(N_part=100)
return mass_s, pos_s, vel_s, mass_g, pos_g, vel_g
@pytest.fixture(scope="session")
def halo_particles(mock_dm_halo):
"""Spherical mock halo."""
def make(N_part=100, seed=None):
random = np.random.RandomState(seed=seed)
mass_dm, pos_dm = mock_dm_halo(N_part=N_part)
vel_dm = random.random_sample(size=(N_part, 3))
return mass_dm, pos_dm, vel_dm
return make
@pytest.fixture
def mock_galaxy(disc_particles_all, halo_particles):
"""Mock galaxy."""
(mass_s, pos_s, vel_s, mass_g, pos_g, vel_g) = disc_particles_all
mass_dm, pos_dm, vel_dm = halo_particles(N_part=100, seed=42)
g = core.Galaxy(
m_s=mass_s * u.M_sun,
x_s=pos_s[:, 0] * u.kpc,
y_s=pos_s[:, 1] * u.kpc,
z_s=pos_s[:, 2] * u.kpc,
vx_s=vel_s[:, 0] * (u.km / u.s),
vy_s=vel_s[:, 1] * (u.km / u.s),
vz_s=vel_s[:, 2] * (u.km / u.s),
m_dm=mass_dm * u.M_sun,
x_dm=pos_dm[:, 0] * u.kpc,
y_dm=pos_dm[:, 1] * u.kpc,
z_dm=pos_dm[:, 2] * u.kpc,
vx_dm=vel_dm[:, 0] * (u.km / u.s),
vy_dm=vel_dm[:, 1] * (u.km / u.s),
vz_dm=vel_dm[:, 2] * (u.km / u.s),
m_g=mass_g * u.M_sun,
x_g=pos_g[:, 0] * u.kpc,
y_g=pos_g[:, 1] * u.kpc,
z_g=pos_g[:, 2] * u.kpc,
vx_g=vel_g[:, 0] * (u.km / u.s),
vy_g=vel_g[:, 1] * (u.km / u.s),
vz_g=vel_g[:, 2] * (u.km / u.s),
)
return g
@pytest.fixture
def mock_real_galaxy():
"""Mock real galaxy."""
dm = np.loadtxt(TEST_DATA_REAL_PATH / "dark.dat")
s = np.loadtxt(TEST_DATA_REAL_PATH / "star.dat")
g = np.loadtxt(TEST_DATA_REAL_PATH / "gas_.dat")
gal = core.Galaxy(
m_s=s[:, 0] * 1e10 * u.M_sun,
x_s=s[:, 1] * u.kpc,
y_s=s[:, 2] * u.kpc,
z_s=s[:, 3] * u.kpc,
vx_s=s[:, 4] * (u.km / u.s),
vy_s=s[:, 5] * (u.km / u.s),
vz_s=s[:, 6] * (u.km / u.s),
m_dm=dm[:, 0] * 1e10 * u.M_sun,
x_dm=dm[:, 1] * u.kpc,
y_dm=dm[:, 2] * u.kpc,
z_dm=dm[:, 3] * u.kpc,
vx_dm=dm[:, 4] * (u.km / u.s),
vy_dm=dm[:, 5] * (u.km / u.s),
vz_dm=dm[:, 6] * (u.km / u.s),
m_g=g[:, 0] * 1e10 * u.M_sun,
x_g=g[:, 1] * u.kpc,
y_g=g[:, 2] * u.kpc,
z_g=g[:, 3] * u.kpc,
vx_g=g[:, 4] * (u.km / u.s),
vy_g=g[:, 5] * (u.km / u.s),
vz_g=g[:, 6] * (u.km / u.s),
)
return gal
|
timestep
|
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
|
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
# MASKED: timestep function (lines 657-687)
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
|
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
| 657
| 687
|
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
|
__getitem__
|
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
|
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
# MASKED: __getitem__ function (lines 303-351)
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
|
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
| 303
| 351
|
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
|
add_numpy_implementation
|
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
|
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
# MASKED: add_numpy_implementation function (lines 353-444)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
|
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
| 353
| 444
|
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
|
add_dynamic_implementation
|
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
|
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
# MASKED: add_dynamic_implementation function (lines 456-476)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
|
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
| 456
| 476
|
from collections.abc import Mapping
import inspect
import types
from typing import Callable
import numpy as np
import sympy
from sympy.codegen import cfunctions as sympy_cfunctions
from numpy.random import randn, rand
from sympy import Function as sympy_Function
from sympy import S
import brian2.units.unitsafefunctions as unitsafe
from brian2.core.preferences import prefs
from brian2.core.variables import Constant
from brian2.units.fundamentalunits import (fail_for_dimension_mismatch,
Quantity, get_dimensions,
DIMENSIONLESS, is_dimensionless)
from brian2.units.allunits import second
__all__ = ['DEFAULT_FUNCTIONS', 'Function', 'implementation', 'declare_types']
BRIAN_DTYPES = ['boolean', 'integer', 'float']
VALID_ARG_TYPES = BRIAN_DTYPES+['any']
VALID_RETURN_TYPES = BRIAN_DTYPES+['highest']
def declare_types(**types):
'''
Decorator to declare argument and result types for a function
Usage is similar to `check_units` except that types must be one of ``{VALID_ARG_TYPES}``
and the result type must be one of ``{VALID_RETURN_TYPES}``. Unspecified argument
types are assumed to be ``'all'`` (i.e. anything is permitted), and an unspecified
result type is assumed to be ``'float'``. Note that the ``'highest'`` option for
result type will give the highest type of its argument, e.g. if the arguments
were boolean and integer then the result would be integer, if the arguments were
integer and float it would be float.
'''
def annotate_function_with_types(f):
if hasattr(f, '_orig_arg_names'):
arg_names = f._orig_arg_names
else:
arg_names = f.__code__.co_varnames[0:f.__code__.co_argcount]
argtypes = []
for name in arg_names:
arg_type = types.get(name, 'any')
if arg_type not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"for argument %s" % (arg_type, VALID_ARG_TYPES, name))
argtypes.append(arg_type)
for n in types:
if n not in arg_names and n!='result':
raise ValueError("Type specified for unknown argument "+n)
return_type = types.get('result', 'float')
if return_type not in VALID_RETURN_TYPES:
raise ValueError("Result type %s is not valid, "
"must be one of %s" % (return_type, VALID_RETURN_TYPES))
f._arg_types = argtypes
f._return_type = return_type
f._orig_arg_names = arg_names
f._annotation_attributes = getattr(f, '_annotation_attributes', [])+['_arg_types', '_return_type']
return f
return annotate_function_with_types
class Function(object):
'''
An abstract specification of a function that can be used as part of
model equations, etc.
Parameters
----------
pyfunc : function
A Python function that is represented by this `Function` object.
sympy_func : `sympy.Function`, optional
A corresponding sympy function (if any). Allows functions to be
interpreted by sympy and potentially make simplifications. For example,
``sqrt(x**2)`` could be replaced by ``abs(x)``.
arg_units : list of `Unit`, optional
If `pyfunc` does not provide unit information (which typically means
that it was not annotated with a `check_units` decorator), the
units of the arguments have to specified explicitly using this
parameter.
return_unit : `Unit` or callable, optional
Same as for `arg_units`: if `pyfunc` does not provide unit information,
this information has to be provided explictly here. `return_unit` can
either be a specific `Unit`, if the function always returns the same
unit, or a function of the input units, e.g. a "square" function would
return the square of its input units, i.e. `return_unit` could be
specified as ``lambda u: u**2``.
arg_types : list of str, optional
Similar to `arg_units`, but gives the type of the argument rather than
its unit. In the current version of Brian arguments are specified
by one of the following strings: 'boolean', 'integer', 'float', 'any'.
If `arg_types` is not specified, 'any' will be assumed. In
future versions, a more refined specification may be possible. Note that
any argument with a type other than float should have no units. If
return_type : str, optional
Similar to `return_unit` and `arg_types`. In addition to 'boolean',
'integer' and 'float' you can also use 'highest' which will return the
highest type of its arguments. You can also give a function, as for
`return_unit`. If the return type is not specified, it is assumed to
be 'float'.
stateless : bool, optional
Whether this function does not have an internal state, i.e. if it
always returns the same output when called with the same arguments.
This is true for mathematical functions but not true for ``rand()``, for
example. Defaults to ``True``.
auto_vectorise : bool, optional
Whether the implementations of this function should get an additional
argument (not specified in abstract code) that can be used to determine
the number of values that should be returned (for the numpy target), or
an index potentially useful for generating deterministic values
independent of the order of vectorisation (for all other targets). The
main use case are random number functions, e.g. equations refer to
``rand()``, but the generate code will actually call
``rand(_vectorisation_idx)``. Defaults to ``False``.
Notes
-----
If a function should be usable for code generation targets other than
Python/numpy, implementations for these target languages have to be added
using the `~brian2.codegen.functions.implementation` decorator or using the
`~brian2.codegen.functions.add_implementations` function.
'''
def __init__(self, pyfunc, sympy_func=None,
arg_units=None, arg_names=None,
return_unit=None,
arg_types=None, return_type=None,
stateless=True, auto_vectorise=False):
self.pyfunc = pyfunc
self.sympy_func = sympy_func
self._arg_units = arg_units
self._arg_names = arg_names
self._return_unit = return_unit
if return_unit == bool:
self._returns_bool = True
else:
self._returns_bool = False
self._arg_types = arg_types
self._return_type = return_type
self.stateless = stateless
self.auto_vectorise = auto_vectorise
if self._arg_units is None:
if not hasattr(pyfunc, '_arg_units'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"arg_units" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._arg_units is None:
# @check_units sets _arg_units to None if the units aren't
# specified for all of its arguments
raise ValueError(('The Python function "%s" does not specify '
'the units for all of its '
'arguments.') % pyfunc.__name__)
else:
self._arg_units = pyfunc._arg_units
else:
if any(isinstance(u, str) for u in self._arg_units):
if self._arg_names is None:
raise TypeError('Need to specify the names of the '
'arguments.')
if len(self._arg_names) != len(self._arg_units):
raise TypeError(f'arg_names and arg_units need to have the '
f'same length ({len(self._arg_names)} != '
f'({len(self._arg_units)})')
if self._return_unit is None:
if not hasattr(pyfunc, '_return_unit'):
raise ValueError(('The Python function "%s" does not specify '
'how it deals with units, need to specify '
'"return_unit" or use the "@check_units" '
'decorator.') % pyfunc.__name__)
elif pyfunc._return_unit is None:
# @check_units sets _return_unit to None if no "result=..."
# keyword is specified.
raise ValueError(('The Python function "%s" does not specify '
'the unit for its return '
'value.') % pyfunc.__name__)
else:
self._return_unit = pyfunc._return_unit
if self._arg_types is None:
if hasattr(pyfunc, '_arg_types'):
self._arg_types = pyfunc._arg_types
else:
self._arg_types = ['any']*len(self._arg_units)
if self._return_type is None:
self._return_type = getattr(pyfunc, '_return_type', 'float')
for argtype, u in zip(self._arg_types, self._arg_units):
if argtype!='float' and argtype!='any' and u is not None and not is_dimensionless(u):
raise TypeError("Non-float arguments must be dimensionless in function "+pyfunc.__name__)
if argtype not in VALID_ARG_TYPES:
raise ValueError("Argument type %s is not valid, must be one of %s, "
"in function %s" % (argtype, VALID_ARG_TYPES, pyfunc.__name__))
if self._return_type not in VALID_RETURN_TYPES:
raise ValueError("Return type %s is not valid, must be one of %s, "
"in function %s" % (self._return_type, VALID_RETURN_TYPES, pyfunc.__name__))
#: Stores implementations for this function in a
#: `FunctionImplementationContainer`
self.implementations = FunctionImplementationContainer(self)
def is_locally_constant(self, dt):
'''
Return whether this function (if interpreted as a function of time)
should be considered constant over a timestep. This is most importantly
used by `TimedArray` so that linear integration can be used. In its
standard implementation, always returns ``False``.
Parameters
----------
dt : float
The length of a timestep (without units).
Returns
-------
constant : bool
Whether the results of this function can be considered constant
over one timestep of length `dt`.
'''
return False
def __call__(self, *args):
return self.pyfunc(*args)
class FunctionImplementation(object):
'''
A simple container object for function implementations.
Parameters
----------
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
code : language-dependent, optional
A language dependent argument specifying the implementation in the
target language, e.g. a code string or a dictionary of code strings.
namespace : dict-like, optional
A dictionary of mappings from names to values that should be added
to the namespace of a `CodeObject` using the function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
availability_check : callable, optional
A function that will be called to check whether the function should be
made available (e.g. depending on whether it is supported by the
compiler). The function should do nothing if the function is
available, or raise a ``NotImplementedError`` with a message
explaining why it isn't.
dynamic : bool, optional
Whether this `code`/`namespace` is dynamic, i.e. generated for each
new context it is used in. If set to ``True``, `code` and `namespace`
have to be callable with a `Group` as an argument and are expected
to return the final `code` and `namespace`. Defaults to ``False``.
'''
def __init__(self, name=None, code=None, namespace=None,
dependencies=None, availability_check=None,
dynamic=False, compiler_kwds=None):
if compiler_kwds is None:
compiler_kwds = {}
self.name = name
if dependencies is None:
dependencies = {}
self.dependencies = dependencies
self._code = code
self._namespace = namespace
self.dynamic = dynamic
self.compiler_kwds = compiler_kwds
self.availability_check = availability_check
def get_code(self, owner):
if self.availability_check is not None:
self.availability_check()
if self.dynamic:
return self._code(owner)
else:
return self._code
def get_namespace(self, owner):
if self.dynamic:
return self._namespace(owner)
else:
return self._namespace
class FunctionImplementationContainer(Mapping):
'''
Helper object to store implementations and give access in a dictionary-like
fashion, using `CodeGenerator` implementations as a fallback for `CodeObject`
implementations.
'''
def __init__(self, function):
self._function = function
self._implementations = dict()
def __getitem__(self, key):
'''
Find an implementation for this function that can be used by the
`CodeObject` given as `key`. Will find implementations registered
for `key` itself (or one of its parents), or for the `CodeGenerator`
class that `key` uses (or one of its parents). In all cases,
implementations registered for the corresponding names qualify as well.
Parameters
----------
key : `CodeObject`
The `CodeObject` that will use the `Function`
Returns
-------
implementation : `FunctionImplementation`
An implementation suitable for `key`.
'''
fallback = getattr(key, 'generator_class', None)
# in some cases we do the code generation with original_generator_class instead (e.g. GSL)
fallback_parent = getattr(key, 'original_generator_class', None)
for K in [key, fallback, fallback_parent]:
name = getattr(K, 'class_name',
'no class name for key')
for impl_key, impl in self._implementations.items():
impl_key_name = getattr(impl_key, 'class_name',
'no class name for implementation')
if ((impl_key_name is not None and impl_key_name in [K, name]) or
(impl_key is not None and impl_key in [K, name])):
return impl
if hasattr(K, '__bases__'):
for cls in inspect.getmro(K):
if cls in self._implementations:
return self._implementations[cls]
name = getattr(cls, 'class_name', None)
if name in self._implementations:
return self._implementations[name]
# Give a nicer error message if possible
if getattr(key, 'class_name', None) is not None:
key = key.class_name
elif getattr(fallback, 'class_name', None) is not None:
key = fallback.class_name
keys = ', '.join([getattr(k, 'class_name', str(k))
for k in self._implementations])
raise KeyError(('No implementation available for target {key}. '
'Available implementations: {keys}').format(key=key,
keys=keys))
def add_numpy_implementation(self, wrapped_func, dependencies=None,
discard_units=None, compiler_kwds=None):
'''
Add a numpy implementation to a `Function`.
Parameters
----------
function : `Function`
The function description for which an implementation should be added.
wrapped_func : callable
The original function (that will be used for the numpy implementation)
dependencies : list of `Function`, optional
A list of functions this function needs.
discard_units : bool, optional
See `implementation`.
'''
if discard_units is None:
discard_units = prefs['codegen.runtime.numpy.discard_units']
# Get the original function inside the check_units decorator
if hasattr(wrapped_func, '_orig_func'):
orig_func = wrapped_func._orig_func
else:
orig_func = wrapped_func
if discard_units:
new_globals = dict(orig_func.__globals__)
# strip away units in the function by changing its namespace
for key, value in new_globals.items():
if isinstance(value, Quantity):
new_globals[key] = np.asarray(value)
unitless_func = types.FunctionType(orig_func.__code__, new_globals,
orig_func.__name__,
orig_func.__defaults__,
orig_func.__closure__)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=unitless_func,
dependencies=dependencies,
compiler_kwds=None)
else:
def wrapper_function(*args):
arg_units = list(self._function._arg_units)
if self._function.auto_vectorise:
arg_units += [DIMENSIONLESS]
if not len(args) == len(arg_units):
raise ValueError(('Function %s got %d arguments, '
'expected %d') % (self._function.pyfunc.__name__, len(args),
len(arg_units)))
new_args = []
for arg, arg_unit in zip(args, arg_units):
if arg_unit == bool or arg_unit is None or isinstance(arg_unit, str):
new_args.append(arg)
else:
new_args.append(Quantity.with_dimensions(arg,
get_dimensions(arg_unit)))
result = orig_func(*new_args)
if isinstance(self._function._return_unit, Callable):
return_unit = self._function._return_unit(*[get_dimensions(a)
for a in args])
else:
return_unit = self._function._return_unit
if return_unit == bool:
if not (isinstance(result, bool) or
np.asarray(result).dtype == bool):
raise TypeError('The function %s returned '
'%s, but it was expected '
'to return a boolean '
'value ' % (orig_func.__name__,
result))
elif (isinstance(return_unit, int) and return_unit == 1) or return_unit.dim is DIMENSIONLESS:
fail_for_dimension_mismatch(result,
return_unit,
'The function %s returned '
'{value}, but it was expected '
'to return a dimensionless '
'quantity' % orig_func.__name__,
value=result)
else:
fail_for_dimension_mismatch(result,
return_unit,
('The function %s returned '
'{value}, but it was expected '
'to return a quantity with '
'units %r') % (orig_func.__name__,
return_unit),
value=result)
return np.asarray(result)
self._implementations['numpy'] = FunctionImplementation(name=None,
code=wrapper_function,
dependencies=dependencies)
def add_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
self._implementations[target] = FunctionImplementation(name=name,
code=code,
dependencies=dependencies,
availability_check=availability_check,
namespace=namespace,
compiler_kwds=compiler_kwds)
def add_dynamic_implementation(self, target, code, namespace=None,
dependencies=None, availability_check=None,
name=None, compiler_kwds=None):
'''
Adds an "dynamic implementation" for this function. `code` and `namespace`
arguments are expected to be callables that will be called in
`Network.before_run` with the owner of the `CodeObject` as an argument.
This allows to generate code that depends on details of the context it
is run in, e.g. the ``dt`` of a clock.
'''
if not callable(code):
raise TypeError('code argument has to be a callable, is type %s instead' % type(code))
if namespace is not None and not callable(namespace):
raise TypeError('namespace argument has to be a callable, is type %s instead' % type(code))
self._implementations[target] = FunctionImplementation(name=name,
code=code,
namespace=namespace,
dependencies=dependencies,
availability_check=availability_check,
dynamic=True,
compiler_kwds=compiler_kwds)
def __len__(self):
return len(self._implementations)
def __iter__(self):
return iter(self._implementations)
def implementation(target, code=None, namespace=None, dependencies=None,
discard_units=None, name=None, **compiler_kwds):
'''
A simple decorator to extend user-written Python functions to work with code
generation in other languages.
Parameters
----------
target : str
Name of the code generation target (e.g. ``'cython'``) for which to add
an implementation.
code : str or dict-like, optional
What kind of code the target language expects is language-specific,
e.g. C++ code allows for a dictionary of code blocks instead of a
single string.
namespaces : dict-like, optional
A namespace dictionary (i.e. a mapping of names to values) that
should be added to a `CodeObject` namespace when using this function.
dependencies : dict-like, optional
A mapping of names to `Function` objects, for additional functions
needed by this function.
discard_units: bool, optional
Numpy functions can internally make use of the unit system. However,
during a simulation run, state variables are passed around as unitless
values for efficiency. If `discard_units` is set to ``False``, input
arguments will have units added to them so that the function can still
use units internally (the units will be stripped away from the return
value as well). Alternatively, if `discard_units` is set to ``True``,
the function will receive unitless values as its input. The namespace
of the function will be altered to make references to units (e.g.
``ms``) refer to the corresponding floating point values so that no
unit mismatch errors are raised. Note that this system cannot work in
all cases, e.g. it does not work with functions that internally imports
values (e.g. does ``from brian2 import ms``) or access values with
units indirectly (e.g. uses ``brian2.ms`` instead of ``ms``). If no
value is given, defaults to the preference setting
`codegen.runtime.numpy.discard_units`.
name : str, optional
The name of the function in the target language. Should only be
specified if the function has to be renamed for the target language.
compiler_kwds : dict, optional
Additional keyword arguments will be transferred to the code generation
stage, e.g. for C++-based targets, the code can make use of additional
header files by providing a list of strings as the ``headers`` argument.
Notes
-----
While it is in principle possible to provide a numpy implementation
as an argument for this decorator, this is normally not necessary -- the
numpy implementation should be provided in the decorated function.
If this decorator is used with other decorators such as `check_units` or
`declare_types`, it should be the uppermost decorator (that is, the
last one to be applied).
Examples
--------
Sample usage::
@implementation('cpp',"""
#include<math.h>
inline double usersin(double x)
{
return sin(x);
}
""")
def usersin(x):
return sin(x)
'''
def do_user_implementation(func):
# Allow nesting of decorators
if isinstance(func, Function):
function = func
else:
function = Function(func)
if discard_units: # Add a numpy implementation that discards units
if not (target == 'numpy' and code is None):
raise TypeError(("'discard_units' can only be set for code "
"generation target 'numpy', without providing "
"any code."))
function.implementations.add_numpy_implementation(wrapped_func=func,
dependencies=dependencies,
discard_units=discard_units,
compiler_kwds=compiler_kwds)
else:
function.implementations.add_implementation(target, code=code,
dependencies=dependencies,
namespace=namespace,
name=name,
compiler_kwds=compiler_kwds)
# # copy any annotation attributes
# if hasattr(func, '_annotation_attributes'):
# for attrname in func._annotation_attributes:
# setattr(function, attrname, getattr(func, attrname))
# function._annotation_attributes = getattr(func, '_annotation_attributes', [])
return function
return do_user_implementation
class SymbolicConstant(Constant):
'''
Class for representing constants (e.g. pi) that are understood by sympy.
'''
def __init__(self, name, sympy_obj, value):
super(SymbolicConstant, self).__init__(name, value=value)
self.sympy_obj = sympy_obj
################################################################################
# Standard functions and constants
################################################################################
def _exprel(x):
if x.is_zero:
return S.One
else:
return (sympy.exp(x) - S.One)/x
class exprel(sympy_Function):
"""
Represents ``(exp(x) - 1)/x``.
The benefit of using ``exprel(x)`` over ``(exp(x) - 1)/x``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero, and cannot be evaluated when x is
equal to zero.
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return (sympy.exp(*self.args)*(self.args[0] - S.One) + S.One)/self.args[0]**2
else:
raise sympy.ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _exprel(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
if arg.is_zero:
return S.One
else:
return (sympy.exp(arg) - S.One)/arg
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
if arg is None:
return None
if arg.is_zero:
return S.One
exp_arg = sympy.exp.eval(arg)
if exp_arg is not None:
return (exp_arg - S.One)/arg
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
_infinity_int = 1073741823 # maximum 32bit integer divided by 2
def timestep(t, dt):
'''
Converts a given time to an integer time step. This function slightly shifts
the time before dividing it by ``dt`` to make sure that multiples of ``dt``
do not end up in the preceding time step due to floating point issues. This
function is used in the refractoriness calculation.
.. versionadded:: 2.1.3
Parameters
----------
t : np.ndarray, float, Quantity
The time to convert.
dt : float or Quantity
The length of a simulation time step.
Returns
-------
ts : np.ndarray, np.int64
The time step corresponding to the given time.
Notes
-----
This function cannot handle infinity values, use big values instead (e.g.
a `NeuronGroup` will use ``-1e4*second`` as the value of the ``lastspike``
variable for neurons that never spiked).
'''
elapsed_steps = np.array((t + 1e-3*dt)/dt, dtype=np.int64)
if elapsed_steps.shape == ():
elapsed_steps = elapsed_steps.item()
return elapsed_steps
DEFAULT_FUNCTIONS = {
# numpy functions that have the same name in numpy and math.h
'cos': Function(unitsafe.cos,
sympy_func=sympy.functions.elementary.trigonometric.cos),
'sin': Function(unitsafe.sin,
sympy_func=sympy.functions.elementary.trigonometric.sin),
'tan': Function(unitsafe.tan,
sympy_func=sympy.functions.elementary.trigonometric.tan),
'cosh': Function(unitsafe.cosh,
sympy_func=sympy.functions.elementary.hyperbolic.cosh),
'sinh': Function(unitsafe.sinh,
sympy_func=sympy.functions.elementary.hyperbolic.sinh),
'tanh': Function(unitsafe.tanh,
sympy_func=sympy.functions.elementary.hyperbolic.tanh),
'exp': Function(unitsafe.exp,
sympy_func=sympy.functions.elementary.exponential.exp),
'log': Function(unitsafe.log,
sympy_func=sympy.functions.elementary.exponential.log),
'log10': Function(unitsafe.log10,
sympy_func=sympy_cfunctions.log10),
'expm1': Function(unitsafe.expm1,
sympy_func=sympy_cfunctions.expm1),
'exprel': Function(unitsafe.exprel,
sympy_func=exprel),
'log1p': Function(unitsafe.log1p,
sympy_func=sympy_cfunctions.log1p),
'sqrt': Function(np.sqrt,
sympy_func=sympy.functions.elementary.miscellaneous.sqrt,
arg_units=[None], return_unit=lambda u: u**0.5),
'ceil': Function(np.ceil,
sympy_func=sympy.functions.elementary.integers.ceiling,
arg_units=[None], return_unit=lambda u: u),
'floor': Function(np.floor,
sympy_func=sympy.functions.elementary.integers.floor,
arg_units=[None], return_unit=lambda u: u),
# numpy functions that have a different name in numpy and math.h
'arccos': Function(unitsafe.arccos,
sympy_func=sympy.functions.elementary.trigonometric.acos),
'arcsin': Function(unitsafe.arcsin,
sympy_func=sympy.functions.elementary.trigonometric.asin),
'arctan': Function(unitsafe.arctan,
sympy_func=sympy.functions.elementary.trigonometric.atan),
'abs': Function(np.abs, return_type='highest',
sympy_func=sympy.functions.elementary.complexes.Abs,
arg_units=[None], return_unit=lambda u: u),
'sign': Function(pyfunc=np.sign, sympy_func=sympy.sign, return_type='highest',
arg_units=[None], return_unit=1),
# functions that need special treatment
'rand': Function(pyfunc=rand, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'randn': Function(pyfunc=randn, arg_units=[], return_unit=1, stateless=False, auto_vectorise=True),
'poisson': Function(pyfunc=np.random.poisson, arg_units=[1], return_unit=1, return_type='integer',
stateless=False, auto_vectorise=True),
'clip': Function(pyfunc=np.clip,
arg_units=[None, 'a', 'a'],
arg_names=['a', 'a_min', 'a_max'],
return_type='highest',
return_unit=lambda u1, u2, u3: u1),
'int': Function(pyfunc=np.int_, return_type='integer',
arg_units=[1], return_unit=1),
'timestep': Function(pyfunc=timestep, return_type='integer',
arg_units=[second, second], return_unit=1)
}
DEFAULT_CONSTANTS = {'pi': SymbolicConstant('pi', sympy.pi, value=np.pi),
'e': SymbolicConstant('e', sympy.E, value=np.e),
'inf': SymbolicConstant('inf', S.Infinity,
value=np.inf),
'-inf': SymbolicConstant('-inf', S.NegativeInfinity,
value=-np.inf)}
|
get_sentiment
|
Analyzing Sentiment in a String
Args:
text_content The text content to analyze
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A streaming python pipeline to read in PubSub tweets and perform
classification using Prediction API"""
import argparse
import datetime
import json
import logging
import os
import socket
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.util import BatchElements
from google.api_core import retry
from google.api_core import exceptions
from google.cloud import language_v1
from google.cloud.language_v1 import enums
TIMEOUT_IN_SEC = 60 * 2 # 2 minutes timeout limit
socket.setdefaulttimeout(TIMEOUT_IN_SEC)
PROJECT_ID = os.getenv('PROJECT_ID')
# MASKED: get_sentiment function (lines 44-78)
def prediction_helper(messages):
"""Processes PubSub messages and calls AI Platform prediction.
:param messages:
:return:
"""
# Handle single string.
if not isinstance(messages, list):
messages = [messages]
# Messages from PubSub are JSON strings
instances = list(map(lambda message: json.loads(message), messages))
# Estimate the sentiment of the 'text' of each tweet
scores = get_sentiment(
[instance['text'] for instance in instances if instance.get('text')])
if len(scores) == len(instances):
for i, instance in enumerate(instances):
logging.info('Processed {} instances.'.format(len(instances)))
instance['sentiment'] = scores[i]
return instances
logging.error('Invalid scores {} instances {}'.format(len(scores),
len(instances)))
logging.error(instances)
return
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries, where each contains one message
and its publish timestamp.
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add timestamps to messages" >> beam.ParDo(AddTimestamps())
# Use a dummy key to group the elements in the same window.
# Note that all the elements in one window must fit into memory
# for this. If the windowed elements do not fit into memory,
# please consider using `beam.util.BatchElements`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.BatchElements
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Groupby" >> beam.GroupByKey()
| "Abandon Dummy Key" >> beam.MapTuple(lambda _, val: val)
)
class AddTimestamps(beam.DoFn):
@staticmethod
def process(element, publish_time=beam.DoFn.TimestampParam):
"""Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
"""
yield {
"message_body": element.decode("utf-8"),
"publish_time": datetime.datetime.utcfromtimestamp(
float(publish_time)
).strftime("%Y-%m-%d %H:%M:%S.%f"),
}
def run(args, pipeline_args=None):
"""Executes Pipeline.
:param args:
:param pipeline_args:
:return:
"""
"""Build and run the pipeline."""
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
pipeline_options.view_as(StandardOptions).runner = args.runner
# Run on Cloud DataFlow by default
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = 'pubsub-api-bigquery'
google_cloud_options.staging_location = args.staging_location
google_cloud_options.temp_location = args.temp_location
google_cloud_options.region = args.region
p = beam.Pipeline(options=pipeline_options)
lines = p | 'read in tweets' >> beam.io.ReadFromPubSub(
topic=args.input_topic,
with_attributes=False,
id_label='tweet_id') # TODO: Change to PubSub id.
# Window them, and batch them into batches. (Not too large)
output_tweets = (lines | 'assign window key' >> beam.WindowInto(
window.FixedWindows(args.window_size))
| 'batch into n batches' >> BatchElements(
min_batch_size=args.min_batch_size,
max_batch_size=args.max_batch_size)
| 'predict sentiment' >> beam.FlatMap(
lambda messages: prediction_helper(messages))
)
# Make explicit BQ schema for output tables:
bq_schema_json = {"fields": [{"name": "id", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "user_id", "type": "STRING"},
{"name": "sentiment", "type": "FLOAT"},
{"name": "posted_at", "type": "TIMESTAMP"},
{"name": "favorite_count", "type": "INTEGER"},
{"name": "retweet_count", "type": "INTEGER"},
{"name": "media", "type": "STRING"},
]}
bq_schema = parse_table_schema_from_json(json.dumps(bq_schema_json))
# Write to BigQuery
output_tweets | 'store twitter posts' >> beam.io.WriteToBigQuery(
table=args.bigquery_table,
dataset=args.bigquery_dataset,
schema=bq_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
project=PROJECT_ID
)
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-topic',
help='The Cloud Pub/Sub topic to read from.\n'
'projects/<PROJECT_NAME>/topics/<TOPIC_NAME>',
required=True
)
parser.add_argument(
'--region',
help='The DataFlow region',
default='us-central1'
)
parser.add_argument(
'--staging-location',
help='The DataFlow staging location',
default='gs://<bucket_name>/staging/',
required=True
)
parser.add_argument(
'--temp-location',
help='The DataFlow temp location',
default='gs://<bucket_name>/tmp/',
required=True
)
parser.add_argument(
'--bigquery-dataset',
help='BigQuery dataset',
required=True
)
parser.add_argument(
'--bigquery-table',
help='BigQuery OutPut table',
required=True
)
parser.add_argument(
'--window-size',
type=int,
default=60,
help="Output file's window size in number of seconds",
)
parser.add_argument(
'--min-batch-size',
type=int,
default=1,
help='Min batch size for Windowing',
)
parser.add_argument(
'--max-batch-size',
type=int,
default=100,
help='Min batch size for Windowing',
)
parser.add_argument(
'--runner',
type=str,
default='DataflowRunner',
help='DataFlow running mode',
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args,
pipeline_args
)
|
def get_sentiment(instances_content):
"""Analyzing Sentiment in a String
Args:
text_content The text content to analyze
"""
scores = []
client = language_v1.LanguageServiceClient()
encoding_type = enums.EncodingType.UTF8
language = 'en'
type_ = enums.Document.Type.PLAIN_TEXT
for content in instances_content:
content = content.encode('utf-8') if isinstance(content,
unicode) else str(
content)
document = {'content': content, 'type': type_, 'language': language}
try:
response = client.analyze_sentiment(document,
encoding_type=encoding_type,
timeout=30,
retry=retry.Retry(deadline=60))
# Get overall sentiment of the input document
if response.document_sentiment.score:
scores.append(response.document_sentiment.score)
else:
scores.append(-1)
logging.error(
'Document sentiment score not found for {}'.format(content))
except exceptions.GoogleAPICallError as e:
logging.exception(e)
except exceptions.RetryError as e:
logging.exception(e)
except ValueError as e:
logging.exception(e)
return scores
| 44
| 78
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A streaming python pipeline to read in PubSub tweets and perform
classification using Prediction API"""
import argparse
import datetime
import json
import logging
import os
import socket
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.util import BatchElements
from google.api_core import retry
from google.api_core import exceptions
from google.cloud import language_v1
from google.cloud.language_v1 import enums
TIMEOUT_IN_SEC = 60 * 2 # 2 minutes timeout limit
socket.setdefaulttimeout(TIMEOUT_IN_SEC)
PROJECT_ID = os.getenv('PROJECT_ID')
def get_sentiment(instances_content):
"""Analyzing Sentiment in a String
Args:
text_content The text content to analyze
"""
scores = []
client = language_v1.LanguageServiceClient()
encoding_type = enums.EncodingType.UTF8
language = 'en'
type_ = enums.Document.Type.PLAIN_TEXT
for content in instances_content:
content = content.encode('utf-8') if isinstance(content,
unicode) else str(
content)
document = {'content': content, 'type': type_, 'language': language}
try:
response = client.analyze_sentiment(document,
encoding_type=encoding_type,
timeout=30,
retry=retry.Retry(deadline=60))
# Get overall sentiment of the input document
if response.document_sentiment.score:
scores.append(response.document_sentiment.score)
else:
scores.append(-1)
logging.error(
'Document sentiment score not found for {}'.format(content))
except exceptions.GoogleAPICallError as e:
logging.exception(e)
except exceptions.RetryError as e:
logging.exception(e)
except ValueError as e:
logging.exception(e)
return scores
def prediction_helper(messages):
"""Processes PubSub messages and calls AI Platform prediction.
:param messages:
:return:
"""
# Handle single string.
if not isinstance(messages, list):
messages = [messages]
# Messages from PubSub are JSON strings
instances = list(map(lambda message: json.loads(message), messages))
# Estimate the sentiment of the 'text' of each tweet
scores = get_sentiment(
[instance['text'] for instance in instances if instance.get('text')])
if len(scores) == len(instances):
for i, instance in enumerate(instances):
logging.info('Processed {} instances.'.format(len(instances)))
instance['sentiment'] = scores[i]
return instances
logging.error('Invalid scores {} instances {}'.format(len(scores),
len(instances)))
logging.error(instances)
return
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries, where each contains one message
and its publish timestamp.
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add timestamps to messages" >> beam.ParDo(AddTimestamps())
# Use a dummy key to group the elements in the same window.
# Note that all the elements in one window must fit into memory
# for this. If the windowed elements do not fit into memory,
# please consider using `beam.util.BatchElements`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.BatchElements
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Groupby" >> beam.GroupByKey()
| "Abandon Dummy Key" >> beam.MapTuple(lambda _, val: val)
)
class AddTimestamps(beam.DoFn):
@staticmethod
def process(element, publish_time=beam.DoFn.TimestampParam):
"""Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
"""
yield {
"message_body": element.decode("utf-8"),
"publish_time": datetime.datetime.utcfromtimestamp(
float(publish_time)
).strftime("%Y-%m-%d %H:%M:%S.%f"),
}
def run(args, pipeline_args=None):
"""Executes Pipeline.
:param args:
:param pipeline_args:
:return:
"""
"""Build and run the pipeline."""
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
pipeline_options.view_as(StandardOptions).runner = args.runner
# Run on Cloud DataFlow by default
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = 'pubsub-api-bigquery'
google_cloud_options.staging_location = args.staging_location
google_cloud_options.temp_location = args.temp_location
google_cloud_options.region = args.region
p = beam.Pipeline(options=pipeline_options)
lines = p | 'read in tweets' >> beam.io.ReadFromPubSub(
topic=args.input_topic,
with_attributes=False,
id_label='tweet_id') # TODO: Change to PubSub id.
# Window them, and batch them into batches. (Not too large)
output_tweets = (lines | 'assign window key' >> beam.WindowInto(
window.FixedWindows(args.window_size))
| 'batch into n batches' >> BatchElements(
min_batch_size=args.min_batch_size,
max_batch_size=args.max_batch_size)
| 'predict sentiment' >> beam.FlatMap(
lambda messages: prediction_helper(messages))
)
# Make explicit BQ schema for output tables:
bq_schema_json = {"fields": [{"name": "id", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "user_id", "type": "STRING"},
{"name": "sentiment", "type": "FLOAT"},
{"name": "posted_at", "type": "TIMESTAMP"},
{"name": "favorite_count", "type": "INTEGER"},
{"name": "retweet_count", "type": "INTEGER"},
{"name": "media", "type": "STRING"},
]}
bq_schema = parse_table_schema_from_json(json.dumps(bq_schema_json))
# Write to BigQuery
output_tweets | 'store twitter posts' >> beam.io.WriteToBigQuery(
table=args.bigquery_table,
dataset=args.bigquery_dataset,
schema=bq_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
project=PROJECT_ID
)
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-topic',
help='The Cloud Pub/Sub topic to read from.\n'
'projects/<PROJECT_NAME>/topics/<TOPIC_NAME>',
required=True
)
parser.add_argument(
'--region',
help='The DataFlow region',
default='us-central1'
)
parser.add_argument(
'--staging-location',
help='The DataFlow staging location',
default='gs://<bucket_name>/staging/',
required=True
)
parser.add_argument(
'--temp-location',
help='The DataFlow temp location',
default='gs://<bucket_name>/tmp/',
required=True
)
parser.add_argument(
'--bigquery-dataset',
help='BigQuery dataset',
required=True
)
parser.add_argument(
'--bigquery-table',
help='BigQuery OutPut table',
required=True
)
parser.add_argument(
'--window-size',
type=int,
default=60,
help="Output file's window size in number of seconds",
)
parser.add_argument(
'--min-batch-size',
type=int,
default=1,
help='Min batch size for Windowing',
)
parser.add_argument(
'--max-batch-size',
type=int,
default=100,
help='Min batch size for Windowing',
)
parser.add_argument(
'--runner',
type=str,
default='DataflowRunner',
help='DataFlow running mode',
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args,
pipeline_args
)
|
run
|
Executes Pipeline.
:param args:
:param pipeline_args:
:return:
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A streaming python pipeline to read in PubSub tweets and perform
classification using Prediction API"""
import argparse
import datetime
import json
import logging
import os
import socket
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.util import BatchElements
from google.api_core import retry
from google.api_core import exceptions
from google.cloud import language_v1
from google.cloud.language_v1 import enums
TIMEOUT_IN_SEC = 60 * 2 # 2 minutes timeout limit
socket.setdefaulttimeout(TIMEOUT_IN_SEC)
PROJECT_ID = os.getenv('PROJECT_ID')
def get_sentiment(instances_content):
"""Analyzing Sentiment in a String
Args:
text_content The text content to analyze
"""
scores = []
client = language_v1.LanguageServiceClient()
encoding_type = enums.EncodingType.UTF8
language = 'en'
type_ = enums.Document.Type.PLAIN_TEXT
for content in instances_content:
content = content.encode('utf-8') if isinstance(content,
unicode) else str(
content)
document = {'content': content, 'type': type_, 'language': language}
try:
response = client.analyze_sentiment(document,
encoding_type=encoding_type,
timeout=30,
retry=retry.Retry(deadline=60))
# Get overall sentiment of the input document
if response.document_sentiment.score:
scores.append(response.document_sentiment.score)
else:
scores.append(-1)
logging.error(
'Document sentiment score not found for {}'.format(content))
except exceptions.GoogleAPICallError as e:
logging.exception(e)
except exceptions.RetryError as e:
logging.exception(e)
except ValueError as e:
logging.exception(e)
return scores
def prediction_helper(messages):
"""Processes PubSub messages and calls AI Platform prediction.
:param messages:
:return:
"""
# Handle single string.
if not isinstance(messages, list):
messages = [messages]
# Messages from PubSub are JSON strings
instances = list(map(lambda message: json.loads(message), messages))
# Estimate the sentiment of the 'text' of each tweet
scores = get_sentiment(
[instance['text'] for instance in instances if instance.get('text')])
if len(scores) == len(instances):
for i, instance in enumerate(instances):
logging.info('Processed {} instances.'.format(len(instances)))
instance['sentiment'] = scores[i]
return instances
logging.error('Invalid scores {} instances {}'.format(len(scores),
len(instances)))
logging.error(instances)
return
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries, where each contains one message
and its publish timestamp.
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add timestamps to messages" >> beam.ParDo(AddTimestamps())
# Use a dummy key to group the elements in the same window.
# Note that all the elements in one window must fit into memory
# for this. If the windowed elements do not fit into memory,
# please consider using `beam.util.BatchElements`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.BatchElements
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Groupby" >> beam.GroupByKey()
| "Abandon Dummy Key" >> beam.MapTuple(lambda _, val: val)
)
class AddTimestamps(beam.DoFn):
@staticmethod
def process(element, publish_time=beam.DoFn.TimestampParam):
"""Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
"""
yield {
"message_body": element.decode("utf-8"),
"publish_time": datetime.datetime.utcfromtimestamp(
float(publish_time)
).strftime("%Y-%m-%d %H:%M:%S.%f"),
}
# MASKED: run function (lines 153-214)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-topic',
help='The Cloud Pub/Sub topic to read from.\n'
'projects/<PROJECT_NAME>/topics/<TOPIC_NAME>',
required=True
)
parser.add_argument(
'--region',
help='The DataFlow region',
default='us-central1'
)
parser.add_argument(
'--staging-location',
help='The DataFlow staging location',
default='gs://<bucket_name>/staging/',
required=True
)
parser.add_argument(
'--temp-location',
help='The DataFlow temp location',
default='gs://<bucket_name>/tmp/',
required=True
)
parser.add_argument(
'--bigquery-dataset',
help='BigQuery dataset',
required=True
)
parser.add_argument(
'--bigquery-table',
help='BigQuery OutPut table',
required=True
)
parser.add_argument(
'--window-size',
type=int,
default=60,
help="Output file's window size in number of seconds",
)
parser.add_argument(
'--min-batch-size',
type=int,
default=1,
help='Min batch size for Windowing',
)
parser.add_argument(
'--max-batch-size',
type=int,
default=100,
help='Min batch size for Windowing',
)
parser.add_argument(
'--runner',
type=str,
default='DataflowRunner',
help='DataFlow running mode',
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args,
pipeline_args
)
|
def run(args, pipeline_args=None):
"""Executes Pipeline.
:param args:
:param pipeline_args:
:return:
"""
"""Build and run the pipeline."""
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
pipeline_options.view_as(StandardOptions).runner = args.runner
# Run on Cloud DataFlow by default
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = 'pubsub-api-bigquery'
google_cloud_options.staging_location = args.staging_location
google_cloud_options.temp_location = args.temp_location
google_cloud_options.region = args.region
p = beam.Pipeline(options=pipeline_options)
lines = p | 'read in tweets' >> beam.io.ReadFromPubSub(
topic=args.input_topic,
with_attributes=False,
id_label='tweet_id') # TODO: Change to PubSub id.
# Window them, and batch them into batches. (Not too large)
output_tweets = (lines | 'assign window key' >> beam.WindowInto(
window.FixedWindows(args.window_size))
| 'batch into n batches' >> BatchElements(
min_batch_size=args.min_batch_size,
max_batch_size=args.max_batch_size)
| 'predict sentiment' >> beam.FlatMap(
lambda messages: prediction_helper(messages))
)
# Make explicit BQ schema for output tables:
bq_schema_json = {"fields": [{"name": "id", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "user_id", "type": "STRING"},
{"name": "sentiment", "type": "FLOAT"},
{"name": "posted_at", "type": "TIMESTAMP"},
{"name": "favorite_count", "type": "INTEGER"},
{"name": "retweet_count", "type": "INTEGER"},
{"name": "media", "type": "STRING"},
]}
bq_schema = parse_table_schema_from_json(json.dumps(bq_schema_json))
# Write to BigQuery
output_tweets | 'store twitter posts' >> beam.io.WriteToBigQuery(
table=args.bigquery_table,
dataset=args.bigquery_dataset,
schema=bq_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
project=PROJECT_ID
)
result = p.run()
result.wait_until_finish()
| 153
| 214
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A streaming python pipeline to read in PubSub tweets and perform
classification using Prediction API"""
import argparse
import datetime
import json
import logging
import os
import socket
import apache_beam as beam
import apache_beam.transforms.window as window
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.options.pipeline_options import GoogleCloudOptions
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.transforms.util import BatchElements
from google.api_core import retry
from google.api_core import exceptions
from google.cloud import language_v1
from google.cloud.language_v1 import enums
TIMEOUT_IN_SEC = 60 * 2 # 2 minutes timeout limit
socket.setdefaulttimeout(TIMEOUT_IN_SEC)
PROJECT_ID = os.getenv('PROJECT_ID')
def get_sentiment(instances_content):
"""Analyzing Sentiment in a String
Args:
text_content The text content to analyze
"""
scores = []
client = language_v1.LanguageServiceClient()
encoding_type = enums.EncodingType.UTF8
language = 'en'
type_ = enums.Document.Type.PLAIN_TEXT
for content in instances_content:
content = content.encode('utf-8') if isinstance(content,
unicode) else str(
content)
document = {'content': content, 'type': type_, 'language': language}
try:
response = client.analyze_sentiment(document,
encoding_type=encoding_type,
timeout=30,
retry=retry.Retry(deadline=60))
# Get overall sentiment of the input document
if response.document_sentiment.score:
scores.append(response.document_sentiment.score)
else:
scores.append(-1)
logging.error(
'Document sentiment score not found for {}'.format(content))
except exceptions.GoogleAPICallError as e:
logging.exception(e)
except exceptions.RetryError as e:
logging.exception(e)
except ValueError as e:
logging.exception(e)
return scores
def prediction_helper(messages):
"""Processes PubSub messages and calls AI Platform prediction.
:param messages:
:return:
"""
# Handle single string.
if not isinstance(messages, list):
messages = [messages]
# Messages from PubSub are JSON strings
instances = list(map(lambda message: json.loads(message), messages))
# Estimate the sentiment of the 'text' of each tweet
scores = get_sentiment(
[instance['text'] for instance in instances if instance.get('text')])
if len(scores) == len(instances):
for i, instance in enumerate(instances):
logging.info('Processed {} instances.'.format(len(instances)))
instance['sentiment'] = scores[i]
return instances
logging.error('Invalid scores {} instances {}'.format(len(scores),
len(instances)))
logging.error(instances)
return
class GroupWindowsIntoBatches(beam.PTransform):
"""A composite transform that groups Pub/Sub messages based on publish
time and outputs a list of dictionaries, where each contains one message
and its publish timestamp.
"""
def __init__(self, window_size):
# Convert minutes into seconds.
self.window_size = int(window_size * 60)
def expand(self, pcoll):
return (
pcoll
# Assigns window info to each Pub/Sub message based on its
# publish timestamp.
| "Window into Fixed Intervals"
>> beam.WindowInto(window.FixedWindows(self.window_size))
| "Add timestamps to messages" >> beam.ParDo(AddTimestamps())
# Use a dummy key to group the elements in the same window.
# Note that all the elements in one window must fit into memory
# for this. If the windowed elements do not fit into memory,
# please consider using `beam.util.BatchElements`.
# https://beam.apache.org/releases/pydoc/current/apache_beam.transforms.util.html#apache_beam.transforms.util.BatchElements
| "Add Dummy Key" >> beam.Map(lambda elem: (None, elem))
| "Groupby" >> beam.GroupByKey()
| "Abandon Dummy Key" >> beam.MapTuple(lambda _, val: val)
)
class AddTimestamps(beam.DoFn):
@staticmethod
def process(element, publish_time=beam.DoFn.TimestampParam):
"""Processes each incoming windowed element by extracting the Pub/Sub
message and its publish timestamp into a dictionary. `publish_time`
defaults to the publish timestamp returned by the Pub/Sub server. It
is bound to each element by Beam at runtime.
"""
yield {
"message_body": element.decode("utf-8"),
"publish_time": datetime.datetime.utcfromtimestamp(
float(publish_time)
).strftime("%Y-%m-%d %H:%M:%S.%f"),
}
def run(args, pipeline_args=None):
"""Executes Pipeline.
:param args:
:param pipeline_args:
:return:
"""
"""Build and run the pipeline."""
# We use the save_main_session option because one or more DoFn's in this
# workflow rely on global context (e.g., a module imported at module level).
pipeline_options = PipelineOptions(
pipeline_args, streaming=True, save_main_session=True
)
pipeline_options.view_as(StandardOptions).runner = args.runner
# Run on Cloud DataFlow by default
google_cloud_options = pipeline_options.view_as(GoogleCloudOptions)
google_cloud_options.project = PROJECT_ID
google_cloud_options.job_name = 'pubsub-api-bigquery'
google_cloud_options.staging_location = args.staging_location
google_cloud_options.temp_location = args.temp_location
google_cloud_options.region = args.region
p = beam.Pipeline(options=pipeline_options)
lines = p | 'read in tweets' >> beam.io.ReadFromPubSub(
topic=args.input_topic,
with_attributes=False,
id_label='tweet_id') # TODO: Change to PubSub id.
# Window them, and batch them into batches. (Not too large)
output_tweets = (lines | 'assign window key' >> beam.WindowInto(
window.FixedWindows(args.window_size))
| 'batch into n batches' >> BatchElements(
min_batch_size=args.min_batch_size,
max_batch_size=args.max_batch_size)
| 'predict sentiment' >> beam.FlatMap(
lambda messages: prediction_helper(messages))
)
# Make explicit BQ schema for output tables:
bq_schema_json = {"fields": [{"name": "id", "type": "STRING"},
{"name": "text", "type": "STRING"},
{"name": "user_id", "type": "STRING"},
{"name": "sentiment", "type": "FLOAT"},
{"name": "posted_at", "type": "TIMESTAMP"},
{"name": "favorite_count", "type": "INTEGER"},
{"name": "retweet_count", "type": "INTEGER"},
{"name": "media", "type": "STRING"},
]}
bq_schema = parse_table_schema_from_json(json.dumps(bq_schema_json))
# Write to BigQuery
output_tweets | 'store twitter posts' >> beam.io.WriteToBigQuery(
table=args.bigquery_table,
dataset=args.bigquery_dataset,
schema=bq_schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
project=PROJECT_ID
)
result = p.run()
result.wait_until_finish()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument(
'--input-topic',
help='The Cloud Pub/Sub topic to read from.\n'
'projects/<PROJECT_NAME>/topics/<TOPIC_NAME>',
required=True
)
parser.add_argument(
'--region',
help='The DataFlow region',
default='us-central1'
)
parser.add_argument(
'--staging-location',
help='The DataFlow staging location',
default='gs://<bucket_name>/staging/',
required=True
)
parser.add_argument(
'--temp-location',
help='The DataFlow temp location',
default='gs://<bucket_name>/tmp/',
required=True
)
parser.add_argument(
'--bigquery-dataset',
help='BigQuery dataset',
required=True
)
parser.add_argument(
'--bigquery-table',
help='BigQuery OutPut table',
required=True
)
parser.add_argument(
'--window-size',
type=int,
default=60,
help="Output file's window size in number of seconds",
)
parser.add_argument(
'--min-batch-size',
type=int,
default=1,
help='Min batch size for Windowing',
)
parser.add_argument(
'--max-batch-size',
type=int,
default=100,
help='Min batch size for Windowing',
)
parser.add_argument(
'--runner',
type=str,
default='DataflowRunner',
help='DataFlow running mode',
)
known_args, pipeline_args = parser.parse_known_args()
run(
known_args,
pipeline_args
)
|
_get_break_loop_node
|
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
# MASKED: _get_break_loop_node function (lines 266-285)
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
| 266
| 285
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
_loop_exits_early
|
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
# MASKED: _loop_exits_early function (lines 288-309)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
| 288
| 309
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
_get_properties
|
Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
# MASKED: _get_properties function (lines 324-337)
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
| 324
| 337
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
redefined_by_decorator
|
return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
# MASKED: redefined_by_decorator function (lines 435-451)
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
| 435
| 451
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
_check_not_in_finally
|
check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check.
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
# MASKED: _check_not_in_finally function (lines 1486-1502)
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
| 1,486
| 1,502
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
_check_logical_tautology
|
Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
# MASKED: _check_logical_tautology function (lines 2490-2515)
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
| 2,490
| 2,515
|
# Copyright (c) 2006-2016 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2010 Daniel Harding <dharding@gmail.com>
# Copyright (c) 2012-2014 Google, Inc.
# Copyright (c) 2013-2020 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Nick Bastin <nick.bastin@gmail.com>
# Copyright (c) 2015 Michael Kefeder <oss@multiwave.ch>
# Copyright (c) 2015 Dmitry Pribysh <dmand@yandex.ru>
# Copyright (c) 2015 Stephane Wirtel <stephane@wirtel.be>
# Copyright (c) 2015 Cosmin Poieana <cmin@ropython.org>
# Copyright (c) 2015 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Radu Ciorba <radu@devrandom.ro>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016, 2019 Ashley Whetter <ashley@awhetter.co.uk>
# Copyright (c) 2016, 2018 Jakub Wilk <jwilk@jwilk.net>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Elias Dorneles <eliasdorneles@gmail.com>
# Copyright (c) 2016 Yannack <yannack@users.noreply.github.com>
# Copyright (c) 2016 Alex Jurkiewicz <alex@jurkiewi.cz>
# Copyright (c) 2017, 2019-2021 Pierre Sassoulas <pierre.sassoulas@gmail.com>
# Copyright (c) 2017, 2019-2021 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 danields <danields761@gmail.com>
# Copyright (c) 2017 Jacques Kvam <jwkvam@gmail.com>
# Copyright (c) 2017 ttenhoeve-aa <ttenhoeve@appannie.com>
# Copyright (c) 2018-2019 Nick Drozd <nicholasdrozd@gmail.com>
# Copyright (c) 2018-2019 Ville Skyttä <ville.skytta@iki.fi>
# Copyright (c) 2018 Sergei Lebedev <185856+superbobry@users.noreply.github.com>
# Copyright (c) 2018 Lucas Cimon <lucas.cimon@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Copyright (c) 2018 Natalie Serebryakova <natalie.serebryakova@Natalies-MacBook-Pro.local>
# Copyright (c) 2018 Sushobhit <31987769+sushobhit27@users.noreply.github.com>
# Copyright (c) 2018 SergeyKosarchuk <sergeykosarchuk@gmail.com>
# Copyright (c) 2018 Steven M. Vascellaro <svascellaro@gmail.com>
# Copyright (c) 2018 Mike Frysinger <vapier@gmail.com>
# Copyright (c) 2018 Chris Lamb <chris@chris-lamb.co.uk>
# Copyright (c) 2018 glmdgrielson <32415403+glmdgrielson@users.noreply.github.com>
# Copyright (c) 2019 Daniel Draper <Germandrummer92@users.noreply.github.com>
# Copyright (c) 2019 Hugo van Kemenade <hugovk@users.noreply.github.com>
# Copyright (c) 2019 Niko Wenselowski <niko@nerdno.de>
# Copyright (c) 2019 Nikita Sobolev <mail@sobolevn.me>
# Copyright (c) 2019 Oisín Moran <OisinMoran@users.noreply.github.com>
# Copyright (c) 2019 Fantix King <fantix@uchicago.edu>
# Copyright (c) 2020 Peter Kolbus <peter.kolbus@gmail.com>
# Copyright (c) 2020 ethan-leba <ethanleba5@gmail.com>
# Copyright (c) 2020 へーさん <hira9603859504@gmail.com>
# Copyright (c) 2020 Damien Baty <damien.baty@polyconseil.fr>
# Copyright (c) 2020 Ram Rachum <ram@rachum.com>
# Copyright (c) 2020 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2020 bernie gray <bfgray3@users.noreply.github.com>
# Copyright (c) 2020 Gabriel R Sezefredo <g@briel.dev>
# Copyright (c) 2020 Benny <benny.mueller91@gmail.com>
# Copyright (c) 2020 Anubhav <35621759+anubh-v@users.noreply.github.com>
# Copyright (c) 2021 Marc Mueller <30130371+cdce8p@users.noreply.github.com>
# Copyright (c) 2021 Andreas Finkler <andi.finkler@gmail.com>
# Copyright (c) 2021 Or Bahari <orbahari@mail.tau.ac.il>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""basic checker for Python code"""
import builtins
import collections
import itertools
import re
import sys
from typing import Pattern
import astroid
from pylint import checkers, exceptions, interfaces
from pylint import utils as lint_utils
from pylint.checkers import utils
from pylint.checkers.utils import (
is_overload_stub,
is_property_deleter,
is_property_setter,
)
from pylint.reporters.ureports import nodes as reporter_nodes
class NamingStyle:
"""It may seem counterintuitive that single naming style has multiple "accepted"
forms of regular expressions, but we need to special-case stuff like dunder names
in method names."""
ANY: Pattern[str] = re.compile(".*")
CLASS_NAME_RGX: Pattern[str] = ANY
MOD_NAME_RGX: Pattern[str] = ANY
CONST_NAME_RGX: Pattern[str] = ANY
COMP_VAR_RGX: Pattern[str] = ANY
DEFAULT_NAME_RGX: Pattern[str] = ANY
CLASS_ATTRIBUTE_RGX: Pattern[str] = ANY
@classmethod
def get_regex(cls, name_type):
return {
"module": cls.MOD_NAME_RGX,
"const": cls.CONST_NAME_RGX,
"class": cls.CLASS_NAME_RGX,
"function": cls.DEFAULT_NAME_RGX,
"method": cls.DEFAULT_NAME_RGX,
"attr": cls.DEFAULT_NAME_RGX,
"argument": cls.DEFAULT_NAME_RGX,
"variable": cls.DEFAULT_NAME_RGX,
"class_attribute": cls.CLASS_ATTRIBUTE_RGX,
"class_const": cls.CONST_NAME_RGX,
"inlinevar": cls.COMP_VAR_RGX,
}[name_type]
class SnakeCaseStyle(NamingStyle):
"""Regex rules for snake_case naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\WA-Z]*$")
DEFAULT_NAME_RGX = re.compile(
r"([^\W\dA-Z][^\WA-Z]{2,}|_[^\WA-Z]*|__[^\WA-Z\d_][^\WA-Z]+__)$"
)
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\WA-Z]{2,}|__.*__)$")
class CamelCaseStyle(NamingStyle):
"""Regex rules for camelCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
CONST_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\dA-Z][^\W_]*$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"([^\W\dA-Z][^\W_]{2,}|__.*__)$")
class PascalCaseStyle(NamingStyle):
"""Regex rules for PascalCase naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\W_]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\W_]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\W_]{2,}$")
class UpperCaseStyle(NamingStyle):
"""Regex rules for UPPER_CASE naming style."""
CLASS_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
MOD_NAME_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
CONST_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]*|__.*__)$")
COMP_VAR_RGX = re.compile(r"[^\W\da-z][^\Wa-z]+$")
DEFAULT_NAME_RGX = re.compile(r"([^\W\da-z][^\Wa-z]{2,}|__[^\W\dA-Z_]\w+__)$")
CLASS_ATTRIBUTE_RGX = re.compile(r"[^\W\da-z][^\Wa-z]{2,}$")
class AnyStyle(NamingStyle):
pass
NAMING_STYLES = {
"snake_case": SnakeCaseStyle,
"camelCase": CamelCaseStyle,
"PascalCase": PascalCaseStyle,
"UPPER_CASE": UpperCaseStyle,
"any": AnyStyle,
}
# do not require a doc string on private/system methods
NO_REQUIRED_DOC_RGX = re.compile("^_")
REVERSED_PROTOCOL_METHOD = "__reversed__"
SEQUENCE_PROTOCOL_METHODS = ("__getitem__", "__len__")
REVERSED_METHODS = (SEQUENCE_PROTOCOL_METHODS, (REVERSED_PROTOCOL_METHOD,))
TYPECHECK_COMPARISON_OPERATORS = frozenset(("is", "is not", "==", "!="))
LITERAL_NODE_TYPES = (astroid.Const, astroid.Dict, astroid.List, astroid.Set)
UNITTEST_CASE = "unittest.case"
BUILTINS = builtins.__name__
TYPE_QNAME = "%s.type" % BUILTINS
ABC_METACLASSES = {"_py_abc.ABCMeta", "abc.ABCMeta"} # Python 3.7+,
# Name categories that are always consistent with all naming conventions.
EXEMPT_NAME_CATEGORIES = {"exempt", "ignore"}
# A mapping from qname -> symbol, to be used when generating messages
# about dangerous default values as arguments
DEFAULT_ARGUMENT_SYMBOLS = dict(
zip(
[".".join([BUILTINS, x]) for x in ("set", "dict", "list")],
["set()", "{}", "[]"],
),
**{
x: "%s()" % x
for x in (
"collections.deque",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.defaultdict",
"collections.UserDict",
"collections.UserList",
)
},
)
REVERSED_COMPS = {"<": ">", "<=": ">=", ">": "<", ">=": "<="}
COMPARISON_OPERATORS = frozenset(("==", "!=", "<", ">", "<=", ">="))
# List of methods which can be redefined
REDEFINABLE_METHODS = frozenset(("__module__",))
TYPING_FORWARD_REF_QNAME = "typing.ForwardRef"
def _redefines_import(node):
"""Detect that the given node (AssignName) is inside an
exception handler and redefines an import from the tryexcept body.
Returns True if the node redefines an import, False otherwise.
"""
current = node
while current and not isinstance(current.parent, astroid.ExceptHandler):
current = current.parent
if not current or not utils.error_of_type(current.parent, ImportError):
return False
try_block = current.parent.parent
for import_node in try_block.nodes_of_class((astroid.ImportFrom, astroid.Import)):
for name, alias in import_node.names:
if alias:
if alias == node.name:
return True
elif name == node.name:
return True
return False
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(
parent,
(
astroid.For,
astroid.ListComp,
astroid.SetComp,
astroid.DictComp,
astroid.GeneratorExp,
),
):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def _get_break_loop_node(break_node):
"""
Returns the loop node that holds the break node in arguments.
Args:
break_node (astroid.Break): the break node of interest.
Returns:
astroid.For or astroid.While: the loop node holding the break node.
"""
loop_nodes = (astroid.For, astroid.While)
parent = break_node.parent
while not isinstance(parent, loop_nodes) or break_node in getattr(
parent, "orelse", []
):
break_node = parent
parent = parent.parent
if parent is None:
break
return parent
def _loop_exits_early(loop):
"""
Returns true if a loop may ends up in a break statement.
Args:
loop (astroid.For, astroid.While): the loop node inspected.
Returns:
bool: True if the loop may ends up in a break statement, False otherwise.
"""
loop_nodes = (astroid.For, astroid.While)
definition_nodes = (astroid.FunctionDef, astroid.ClassDef)
inner_loop_nodes = [
_node
for _node in loop.nodes_of_class(loop_nodes, skip_klass=definition_nodes)
if _node != loop
]
return any(
_node
for _node in loop.nodes_of_class(astroid.Break, skip_klass=definition_nodes)
if _get_break_loop_node(_node) not in inner_loop_nodes
)
def _is_multi_naming_match(match, node_type, confidence):
return (
match is not None
and match.lastgroup is not None
and match.lastgroup not in EXEMPT_NAME_CATEGORIES
and (node_type != "method" or confidence != interfaces.INFERENCE_FAILURE)
)
BUILTIN_PROPERTY = "builtins.property"
def _get_properties(config):
"""Returns a tuple of property classes and names.
Property classes are fully qualified, such as 'abc.abstractproperty' and
property names are the actual names, such as 'abstract_property'.
"""
property_classes = {BUILTIN_PROPERTY}
property_names = set() # Not returning 'property', it has its own check.
if config is not None:
property_classes.update(config.property_classes)
property_names.update(
prop.rsplit(".", 1)[-1] for prop in config.property_classes
)
return property_classes, property_names
def _determine_function_name_type(node: astroid.FunctionDef, config=None):
"""Determine the name type whose regex the a function's name should match.
:param node: A function node.
:param config: Configuration from which to pull additional property classes.
:type config: :class:`optparse.Values`
:returns: One of ('function', 'method', 'attr')
:rtype: str
"""
property_classes, property_names = _get_properties(config)
if not node.is_method():
return "function"
if is_property_setter(node) or is_property_deleter(node):
# If the function is decorated using the prop_method.{setter,getter}
# form, treat it like an attribute as well.
return "attr"
if node.decorators:
decorators = node.decorators.nodes
else:
decorators = []
for decorator in decorators:
# If the function is a property (decorated with @property
# or @abc.abstractproperty), the name type is 'attr'.
if isinstance(decorator, astroid.Name) or (
isinstance(decorator, astroid.Attribute)
and decorator.attrname in property_names
):
inferred = utils.safe_infer(decorator)
if (
inferred
and hasattr(inferred, "qname")
and inferred.qname() in property_classes
):
return "attr"
return "method"
def _has_abstract_methods(node):
"""
Determine if the given `node` has abstract methods.
The methods should be made abstract by decorating them
with `abc` decorators.
"""
return len(utils.unimplemented_abstract_methods(node)) > 0
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ("module", "class", "method", "function"):
try:
total = stats[node_type]
except KeyError as e:
raise exceptions.EmptyReportError() from e
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats["undocumented_" + node_type]
percent = (documented * 100.0) / total
nice_stats[node_type]["percent_documented"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_documented"] = "NC"
try:
percent = (stats["badname_" + node_type] * 100.0) / total
nice_stats[node_type]["percent_badname"] = "%.2f" % percent
except KeyError:
nice_stats[node_type]["percent_badname"] = "NC"
lines = ("type", "number", "old number", "difference", "%documented", "%badname")
for node_type in ("module", "class", "method", "function"):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = lint_utils.diff_string(old, new)
else:
old, diff_str = "NC", "NC"
lines += (
node_type,
str(new),
str(old),
diff_str,
nice_stats[node_type].get("percent_documented", "0"),
nice_stats[node_type].get("percent_badname", "0"),
)
sect.append(reporter_nodes.Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (
isinstance(decorator, astroid.Attribute)
and getattr(decorator.expr, "name", None) == node.name
):
return True
return False
class _BasicChecker(checkers.BaseChecker):
__implements__ = interfaces.IAstroidChecker
name = "basic"
class BasicErrorChecker(_BasicChecker):
msgs = {
"E0100": (
"__init__ method is a generator",
"init-is-generator",
"Used when the special class method __init__ is turned into a "
"generator by a yield in its body.",
),
"E0101": (
"Explicit return in __init__",
"return-in-init",
"Used when the special class method __init__ has an explicit "
"return value.",
),
"E0102": (
"%s already defined line %s",
"function-redefined",
"Used when a function / class / method is redefined.",
),
"E0103": (
"%r not properly in loop",
"not-in-loop",
"Used when break or continue keywords are used outside a loop.",
),
"E0104": (
"Return outside function",
"return-outside-function",
'Used when a "return" statement is found outside a function or method.',
),
"E0105": (
"Yield outside function",
"yield-outside-function",
'Used when a "yield" statement is found outside a function or method.',
),
"E0106": (
"Return with argument inside generator",
"return-arg-in-generator",
'Used when a "return" statement with an argument is found '
"outside in a generator function or method (e.g. with some "
'"yield" statements).',
{"maxversion": (3, 3)},
),
"E0107": (
"Use of the non-existent %s operator",
"nonexistent-operator",
"Used when you attempt to use the C-style pre-increment or "
"pre-decrement operator -- and ++, which doesn't exist in Python.",
),
"E0108": (
"Duplicate argument name %s in function definition",
"duplicate-argument-name",
"Duplicate argument names in function definitions are syntax errors.",
),
"E0110": (
"Abstract class %r with abstract methods instantiated",
"abstract-class-instantiated",
"Used when an abstract class with `abc.ABCMeta` as metaclass "
"has abstract methods and is instantiated.",
),
"W0120": (
"Else clause on loop without a break statement",
"useless-else-on-loop",
"Loops should only have an else clause if they can exit early "
"with a break statement, otherwise the statements under else "
"should be on the same scope as the loop itself.",
),
"E0112": (
"More than one starred expression in assignment",
"too-many-star-expressions",
"Emitted when there are more than one starred "
"expressions (`*x`) in an assignment. This is a SyntaxError.",
),
"E0113": (
"Starred assignment target must be in a list or tuple",
"invalid-star-assignment-target",
"Emitted when a star expression is used as a starred assignment target.",
),
"E0114": (
"Can use starred expression only in assignment target",
"star-needs-assignment-target",
"Emitted when a star expression is not used in an assignment target.",
),
"E0115": (
"Name %r is nonlocal and global",
"nonlocal-and-global",
"Emitted when a name is both nonlocal and global.",
),
"E0116": (
"'continue' not supported inside 'finally' clause",
"continue-in-finally",
"Emitted when the `continue` keyword is found "
"inside a finally clause, which is a SyntaxError.",
{"maxversion": (3, 8)},
),
"E0117": (
"nonlocal name %s found without binding",
"nonlocal-without-binding",
"Emitted when a nonlocal variable does not have an attached "
"name somewhere in the parent scopes",
),
"E0118": (
"Name %r is used prior to global declaration",
"used-prior-global-declaration",
"Emitted when a name is used prior a global declaration, "
"which results in an error since Python 3.6.",
{"minversion": (3, 6)},
),
}
@utils.check_messages("function-redefined")
def visit_classdef(self, node):
self._check_redefinition("class", node)
def _too_many_starred_for_tuple(self, assign_tuple):
starred_count = 0
for elem in assign_tuple.itered():
if isinstance(elem, astroid.Tuple):
return self._too_many_starred_for_tuple(elem)
if isinstance(elem, astroid.Starred):
starred_count += 1
return starred_count > 1
@utils.check_messages("too-many-star-expressions", "invalid-star-assignment-target")
def visit_assign(self, node):
# Check *a, *b = ...
assign_target = node.targets[0]
# Check *a = b
if isinstance(node.targets[0], astroid.Starred):
self.add_message("invalid-star-assignment-target", node=node)
if not isinstance(assign_target, astroid.Tuple):
return
if self._too_many_starred_for_tuple(assign_target):
self.add_message("too-many-star-expressions", node=node)
@utils.check_messages("star-needs-assignment-target")
def visit_starred(self, node):
"""Check that a Starred expression is used in an assignment target."""
if isinstance(node.parent, astroid.Call):
# f(*args) is converted to Call(args=[Starred]), so ignore
# them for this check.
return
if isinstance(
node.parent, (astroid.List, astroid.Tuple, astroid.Set, astroid.Dict)
):
# PEP 448 unpacking.
return
stmt = node.statement()
if not isinstance(stmt, astroid.Assign):
return
if stmt.value is node or stmt.value.parent_of(node):
self.add_message("star-needs-assignment-target", node=node)
@utils.check_messages(
"init-is-generator",
"return-in-init",
"function-redefined",
"return-arg-in-generator",
"duplicate-argument-name",
"nonlocal-and-global",
"used-prior-global-declaration",
)
def visit_functiondef(self, node):
self._check_nonlocal_and_global(node)
self._check_name_used_prior_global(node)
if not redefined_by_decorator(
node
) and not utils.is_registered_in_singledispatch_function(node):
self._check_redefinition(node.is_method() and "method" or "function", node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(
astroid.Return, skip_klass=(astroid.FunctionDef, astroid.ClassDef)
)
if node.is_method() and node.name == "__init__":
if node.is_generator():
self.add_message("init-is-generator", node=node)
else:
values = [r.value for r in returns]
# Are we returning anything but None from constructors
if any(v for v in values if not utils.is_none(v)):
self.add_message("return-in-init", node=node)
# Check for duplicate names by clustering args with same name for detailed report
arg_clusters = collections.defaultdict(list)
arguments = filter(None, [node.args.args, node.args.kwonlyargs])
for arg in itertools.chain.from_iterable(arguments):
arg_clusters[arg.name].append(arg)
# provide detailed report about each repeated argument
for argument_duplicates in arg_clusters.values():
if len(argument_duplicates) != 1:
for argument in argument_duplicates:
self.add_message(
"duplicate-argument-name",
line=argument.lineno,
node=argument,
args=(argument.name,),
)
visit_asyncfunctiondef = visit_functiondef
def _check_name_used_prior_global(self, node):
scope_globals = {
name: child
for child in node.nodes_of_class(astroid.Global)
for name in child.names
if child.scope() is node
}
if not scope_globals:
return
for node_name in node.nodes_of_class(astroid.Name):
if node_name.scope() is not node:
continue
name = node_name.name
corresponding_global = scope_globals.get(name)
if not corresponding_global:
continue
global_lineno = corresponding_global.fromlineno
if global_lineno and global_lineno > node_name.fromlineno:
self.add_message(
"used-prior-global-declaration", node=node_name, args=(name,)
)
def _check_nonlocal_and_global(self, node):
"""Check that a name is both nonlocal and global."""
def same_scope(current):
return current.scope() is node
from_iter = itertools.chain.from_iterable
nonlocals = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Nonlocal)
if same_scope(child)
)
)
if not nonlocals:
return
global_vars = set(
from_iter(
child.names
for child in node.nodes_of_class(astroid.Global)
if same_scope(child)
)
)
for name in nonlocals.intersection(global_vars):
self.add_message("nonlocal-and-global", args=(name,), node=node)
@utils.check_messages("return-outside-function")
def visit_return(self, node):
if not isinstance(node.frame(), astroid.FunctionDef):
self.add_message("return-outside-function", node=node)
@utils.check_messages("yield-outside-function")
def visit_yield(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("yield-outside-function")
def visit_yieldfrom(self, node):
self._check_yield_outside_func(node)
@utils.check_messages("not-in-loop", "continue-in-finally")
def visit_continue(self, node):
self._check_in_loop(node, "continue")
@utils.check_messages("not-in-loop")
def visit_break(self, node):
self._check_in_loop(node, "break")
@utils.check_messages("useless-else-on-loop")
def visit_for(self, node):
self._check_else_on_loop(node)
@utils.check_messages("useless-else-on-loop")
def visit_while(self, node):
self._check_else_on_loop(node)
@utils.check_messages("nonexistent-operator")
def visit_unaryop(self, node):
"""check use of the non-existent ++ and -- operator operator"""
if (
(node.op in "+-")
and isinstance(node.operand, astroid.UnaryOp)
and (node.operand.op == node.op)
):
self.add_message("nonexistent-operator", node=node, args=node.op * 2)
def _check_nonlocal_without_binding(self, node, name):
current_scope = node.scope()
while True:
if current_scope.parent is None:
break
if not isinstance(current_scope, (astroid.ClassDef, astroid.FunctionDef)):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
return
if name not in current_scope.locals:
current_scope = current_scope.parent.scope()
continue
# Okay, found it.
return
if not isinstance(current_scope, astroid.FunctionDef):
self.add_message("nonlocal-without-binding", args=(name,), node=node)
@utils.check_messages("nonlocal-without-binding")
def visit_nonlocal(self, node):
for name in node.names:
self._check_nonlocal_without_binding(node, name)
@utils.check_messages("abstract-class-instantiated")
def visit_call(self, node):
"""Check instantiating abstract class with
abc.ABCMeta as metaclass.
"""
try:
for inferred in node.func.infer():
self._check_inferred_class_is_abstract(inferred, node)
except astroid.InferenceError:
return
def _check_inferred_class_is_abstract(self, inferred, node):
if not isinstance(inferred, astroid.ClassDef):
return
klass = utils.node_frame_class(node)
if klass is inferred:
# Don't emit the warning if the class is instantiated
# in its own body or if the call is not an instance
# creation. If the class is instantiated into its own
# body, we're expecting that it knows what it is doing.
return
# __init__ was called
abstract_methods = _has_abstract_methods(inferred)
if not abstract_methods:
return
metaclass = inferred.metaclass()
if metaclass is None:
# Python 3.4 has `abc.ABC`, which won't be detected
# by ClassNode.metaclass()
for ancestor in inferred.ancestors():
if ancestor.qname() == "abc.ABC":
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
break
return
if metaclass.qname() in ABC_METACLASSES:
self.add_message(
"abstract-class-instantiated", args=(inferred.name,), node=node
)
def _check_yield_outside_func(self, node):
if not isinstance(node.frame(), (astroid.FunctionDef, astroid.Lambda)):
self.add_message("yield-outside-function", node=node)
def _check_else_on_loop(self, node):
"""Check that any loop with an else clause has a break statement."""
if node.orelse and not _loop_exits_early(node):
self.add_message(
"useless-else-on-loop",
node=node,
# This is not optimal, but the line previous
# to the first statement in the else clause
# will usually be the one that contains the else:.
line=node.orelse[0].lineno - 1,
)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astroid.For, astroid.While)):
if node not in _node.orelse:
return
if isinstance(_node, (astroid.ClassDef, astroid.FunctionDef)):
break
if (
isinstance(_node, astroid.TryFinally)
and node in _node.finalbody
and isinstance(node, astroid.Continue)
):
self.add_message("continue-in-finally", node=node)
_node = _node.parent
self.add_message("not-in-loop", node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
parent_frame = node.parent.frame()
# Ignore function stubs created for type information
redefinitions = parent_frame.locals[node.name]
defined_self = next(
(local for local in redefinitions if not utils.is_overload_stub(local)),
node,
)
if defined_self is not node and not astroid.are_exclusive(node, defined_self):
# Additional checks for methods which are not considered
# redefined, since they are already part of the base API.
if (
isinstance(parent_frame, astroid.ClassDef)
and node.name in REDEFINABLE_METHODS
):
return
# Skip typing.overload() functions.
if utils.is_overload_stub(node):
return
# Exempt functions redefined on a condition.
if isinstance(node.parent, astroid.If):
# Exempt "if not <func>" cases
if (
isinstance(node.parent.test, astroid.UnaryOp)
and node.parent.test.op == "not"
and isinstance(node.parent.test.operand, astroid.Name)
and node.parent.test.operand.name == node.name
):
return
# Exempt "if <func> is not None" cases
# pylint: disable=too-many-boolean-expressions
if (
isinstance(node.parent.test, astroid.Compare)
and isinstance(node.parent.test.left, astroid.Name)
and node.parent.test.left.name == node.name
and node.parent.test.ops[0][0] == "is"
and isinstance(node.parent.test.ops[0][1], astroid.Const)
and node.parent.test.ops[0][1].value is None
):
return
# Check if we have forward references for this node.
try:
redefinition_index = redefinitions.index(node)
except ValueError:
pass
else:
for redefinition in redefinitions[:redefinition_index]:
inferred = utils.safe_infer(redefinition)
if (
inferred
and isinstance(inferred, astroid.Instance)
and inferred.qname() == TYPING_FORWARD_REF_QNAME
):
return
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
if dummy_variables_rgx and dummy_variables_rgx.match(node.name):
return
self.add_message(
"function-redefined",
node=node,
args=(redeftype, defined_self.fromlineno),
)
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = interfaces.IAstroidChecker
name = "basic"
msgs = {
"W0101": (
"Unreachable code",
"unreachable",
'Used when there is some code behind a "return" or "raise" '
"statement, which will never be accessed.",
),
"W0102": (
"Dangerous default value %s as argument",
"dangerous-default-value",
"Used when a mutable value as list or dictionary is detected in "
"a default value for an argument.",
),
"W0104": (
"Statement seems to have no effect",
"pointless-statement",
"Used when a statement doesn't have (or at least seems to) any effect.",
),
"W0105": (
"String statement has no effect",
"pointless-string-statement",
"Used when a string is used as a statement (which of course "
"has no effect). This is a particular case of W0104 with its "
"own message so you can easily disable it if you're using "
"those strings as documentation, instead of comments.",
),
"W0106": (
'Expression "%s" is assigned to nothing',
"expression-not-assigned",
"Used when an expression that is not a function call is assigned "
"to nothing. Probably something else was intended.",
),
"W0108": (
"Lambda may not be necessary",
"unnecessary-lambda",
"Used when the body of a lambda expression is a function call "
"on the same argument list as the lambda itself; such lambda "
"expressions are in all but a few cases replaceable with the "
"function being called in the body of the lambda.",
),
"W0109": (
"Duplicate key %r in dictionary",
"duplicate-key",
"Used when a dictionary expression binds the same key multiple times.",
),
"W0122": (
"Use of exec",
"exec-used",
'Used when you use the "exec" statement (function for Python '
"3), to discourage its usage. That doesn't "
"mean you cannot use it !",
),
"W0123": (
"Use of eval",
"eval-used",
'Used when you use the "eval" function, to discourage its '
"usage. Consider using `ast.literal_eval` for safely evaluating "
"strings containing Python expressions "
"from untrusted sources. ",
),
"W0150": (
"%s statement in finally block may swallow exception",
"lost-exception",
"Used when a break or a return statement is found inside the "
"finally clause of a try...finally block: the exceptions raised "
"in the try clause will be silently swallowed instead of being "
"re-raised.",
),
"W0199": (
"Assert called on a 2-item-tuple. Did you mean 'assert x,y'?",
"assert-on-tuple",
"A call of assert on a tuple will always evaluate to true if "
"the tuple is not empty, and will always evaluate to false if "
"it is.",
),
"W0124": (
'Following "as" with another context manager looks like a tuple.',
"confusing-with-statement",
"Emitted when a `with` statement component returns multiple values "
"and uses name binding with `as` only for a part of those values, "
"as in with ctx() as a, b. This can be misleading, since it's not "
"clear if the context manager returns a tuple or if the node without "
"a name binding is another context manager.",
),
"W0125": (
"Using a conditional statement with a constant value",
"using-constant-test",
"Emitted when a conditional statement (If or ternary if) "
"uses a constant value for its test. This might not be what "
"the user intended to do.",
),
"W0126": (
"Using a conditional statement with potentially wrong function or method call due to missing parentheses",
"missing-parentheses-for-call-in-test",
"Emitted when a conditional statement (If or ternary if) "
"seems to wrongly call a function due to missing parentheses",
),
"W0127": (
"Assigning the same variable %r to itself",
"self-assigning-variable",
"Emitted when we detect that a variable is assigned to itself",
),
"W0128": (
"Redeclared variable %r in assignment",
"redeclared-assigned-name",
"Emitted when we detect that a variable was redeclared in the same assignment.",
),
"E0111": (
"The first reversed() argument is not a sequence",
"bad-reversed-sequence",
"Used when the first argument to reversed() builtin "
"isn't a sequence (does not implement __reversed__, "
"nor __getitem__ and __len__",
),
"E0119": (
"format function is not called on str",
"misplaced-format-function",
"Emitted when format function is not called on str object. "
'e.g doing print("value: {}").format(123) instead of '
'print("value: {}".format(123)). This might not be what the user '
"intended to do.",
),
"W0129": (
"Assert statement has a string literal as its first argument. The assert will %s fail.",
"assert-on-string-literal",
"Used when an assert statement has a string literal as its first argument, which will "
"cause the assert to always pass.",
),
}
reports = (("RP0101", "Statistics by type", report_by_type_stats),)
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0, method=0, class_=0)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_if(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_ifexp(self, node):
self._check_using_constant_test(node, node.test)
@utils.check_messages("using-constant-test", "missing-parentheses-for-call-in-test")
def visit_comprehension(self, node):
if node.ifs:
for if_test in node.ifs:
self._check_using_constant_test(node, if_test)
def _check_using_constant_test(self, node, test):
const_nodes = (
astroid.Module,
astroid.scoped_nodes.GeneratorExp,
astroid.Lambda,
astroid.FunctionDef,
astroid.ClassDef,
astroid.bases.Generator,
astroid.UnboundMethod,
astroid.BoundMethod,
astroid.Module,
)
structs = (astroid.Dict, astroid.Tuple, astroid.Set)
# These nodes are excepted, since they are not constant
# values, requiring a computation to happen.
except_nodes = (
astroid.Call,
astroid.BinOp,
astroid.BoolOp,
astroid.UnaryOp,
astroid.Subscript,
)
inferred = None
emit = isinstance(test, (astroid.Const,) + structs + const_nodes)
if not isinstance(test, except_nodes):
inferred = utils.safe_infer(test)
if emit:
self.add_message("using-constant-test", node=node)
elif isinstance(inferred, const_nodes):
# If the constant node is a FunctionDef or Lambda then
# it may be a illicit function call due to missing parentheses
call_inferred = None
try:
if isinstance(inferred, astroid.FunctionDef):
call_inferred = inferred.infer_call_result()
elif isinstance(inferred, astroid.Lambda):
call_inferred = inferred.infer_call_result(node)
except astroid.InferenceError:
call_inferred = None
if call_inferred:
try:
for inf_call in call_inferred:
if inf_call != astroid.Uninferable:
self.add_message(
"missing-parentheses-for-call-in-test", node=node
)
break
except astroid.InferenceError:
pass
self.add_message("using-constant-test", node=node)
def visit_module(self, _):
"""check module name, docstring and required arguments"""
self.stats["module"] += 1
def visit_classdef(self, node): # pylint: disable=unused-argument
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats["class"] += 1
@utils.check_messages(
"pointless-statement", "pointless-string-statement", "expression-not-assigned"
)
def visit_expr(self, node):
"""Check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astroid.Const) and isinstance(expr.value, str):
# treat string statement in a separated message
# Handle PEP-257 attribute docstrings.
# An attribute docstring is defined as being a string right after
# an assignment at the module level, class level or __init__ level.
scope = expr.scope()
if isinstance(
scope, (astroid.ClassDef, astroid.Module, astroid.FunctionDef)
):
if isinstance(scope, astroid.FunctionDef) and scope.name != "__init__":
pass
else:
sibling = expr.previous_sibling()
if (
sibling is not None
and sibling.scope() is scope
and isinstance(sibling, (astroid.Assign, astroid.AnnAssign))
):
return
self.add_message("pointless-string-statement", node=node)
return
# Ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield statement
# * an ellipsis (which can be used on Python 3 instead of pass)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else pointless-statement
if (
isinstance(
expr, (astroid.Yield, astroid.Await, astroid.Ellipsis, astroid.Call)
)
or (
isinstance(node.parent, astroid.TryExcept)
and node.parent.body == [node]
)
or (isinstance(expr, astroid.Const) and expr.value is Ellipsis)
):
return
if any(expr.nodes_of_class(astroid.Call)):
self.add_message(
"expression-not-assigned", node=node, args=expr.as_string()
)
else:
self.add_message("pointless-statement", node=node)
@staticmethod
def _filter_vararg(node, call_args):
# Return the arguments for the given call which are
# not passed as vararg.
for arg in call_args:
if isinstance(arg, astroid.Starred):
if (
isinstance(arg.value, astroid.Name)
and arg.value.name != node.args.vararg
):
yield arg
else:
yield arg
@staticmethod
def _has_variadic_argument(args, variadic_name):
if not args:
return True
for arg in args:
if isinstance(arg.value, astroid.Name):
if arg.value.name != variadic_name:
return True
else:
return True
return False
@utils.check_messages("unnecessary-lambda")
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astroid.Call):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
if isinstance(node.body.func, astroid.Attribute) and isinstance(
node.body.func.expr, astroid.Call
):
# Chained call, the intermediate call might
# return something else (but we don't check that, yet).
return
call_site = astroid.arguments.CallSite.from_call(call)
ordinary_args = list(node.args.args)
new_call_args = list(self._filter_vararg(node, call.args))
if node.args.kwarg:
if self._has_variadic_argument(call.kwargs, node.args.kwarg):
return
if node.args.vararg:
if self._has_variadic_argument(call.starargs, node.args.vararg):
return
elif call.starargs:
return
if call.keywords:
# Look for additional keyword arguments that are not part
# of the lambda's signature
lambda_kwargs = {keyword.name for keyword in node.args.defaults}
if len(lambda_kwargs) != len(call_site.keyword_arguments):
# Different lengths, so probably not identical
return
if set(call_site.keyword_arguments).difference(lambda_kwargs):
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(new_call_args):
return
for arg, passed_arg in zip(ordinary_args, new_call_args):
if not isinstance(passed_arg, astroid.Name):
return
if arg.name != passed_arg.name:
return
self.add_message("unnecessary-lambda", line=node.fromlineno, node=node)
@utils.check_messages("dangerous-default-value")
def visit_functiondef(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats["method" if node.is_method() else "function"] += 1
self._check_dangerous_default(node)
visit_asyncfunctiondef = visit_functiondef
def _check_dangerous_default(self, node):
"""Check for dangerous default values as arguments."""
def is_iterable(internal_node):
return isinstance(internal_node, (astroid.List, astroid.Set, astroid.Dict))
defaults = node.args.defaults or [] + node.args.kw_defaults or []
for default in defaults:
if not default:
continue
try:
value = next(default.infer())
except astroid.InferenceError:
continue
if (
isinstance(value, astroid.Instance)
and value.qname() in DEFAULT_ARGUMENT_SYMBOLS
):
if value is default:
msg = DEFAULT_ARGUMENT_SYMBOLS[value.qname()]
elif isinstance(value, astroid.Instance) or is_iterable(value):
# We are here in the following situation(s):
# * a dict/set/list/tuple call which wasn't inferred
# to a syntax node ({}, () etc.). This can happen
# when the arguments are invalid or unknown to
# the inference.
# * a variable from somewhere else, which turns out to be a list
# or a dict.
if is_iterable(default):
msg = value.pytype()
elif isinstance(default, astroid.Call):
msg = f"{value.name}() ({value.qname()})"
else:
msg = f"{default.as_string()} ({value.qname()})"
else:
# this argument is a name
msg = f"{default.as_string()} ({DEFAULT_ARGUMENT_SYMBOLS[value.qname()]})"
self.add_message("dangerous-default-value", node=node, args=(msg,))
@utils.check_messages("unreachable", "lost-exception")
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "return", (astroid.FunctionDef,))
@utils.check_messages("unreachable")
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("unreachable", "lost-exception")
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, "break", (astroid.For, astroid.While))
@utils.check_messages("unreachable")
def visit_raise(self, node):
"""check if the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@utils.check_messages("exec-used")
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message("exec-used", node=node)
def _check_misplaced_format_function(self, call_node):
if not isinstance(call_node.func, astroid.Attribute):
return
if call_node.func.attrname != "format":
return
expr = utils.safe_infer(call_node.func.expr)
if expr is astroid.Uninferable:
return
if not expr:
# we are doubtful on inferred type of node, so here just check if format
# was called on print()
call_expr = call_node.func.expr
if not isinstance(call_expr, astroid.Call):
return
if (
isinstance(call_expr.func, astroid.Name)
and call_expr.func.name == "print"
):
self.add_message("misplaced-format-function", node=call_node)
@utils.check_messages(
"eval-used", "exec-used", "bad-reversed-sequence", "misplaced-format-function"
)
def visit_call(self, node):
"""visit a Call node -> check if this is not a disallowed builtin
call and check for * or ** use
"""
self._check_misplaced_format_function(node)
if isinstance(node.func, astroid.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or name in node.root()):
if name == "exec":
self.add_message("exec-used", node=node)
elif name == "reversed":
self._check_reversed(node)
elif name == "eval":
self.add_message("eval-used", node=node)
@utils.check_messages("assert-on-tuple", "assert-on-string-literal")
def visit_assert(self, node):
"""check whether assert is used on a tuple or string literal."""
if (
node.fail is None
and isinstance(node.test, astroid.Tuple)
and len(node.test.elts) == 2
):
self.add_message("assert-on-tuple", node=node)
if isinstance(node.test, astroid.Const) and isinstance(node.test.value, str):
if node.test.value:
when = "never"
else:
when = "always"
self.add_message("assert-on-string-literal", node=node, args=(when,))
@utils.check_messages("duplicate-key")
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, _ in node.items:
if isinstance(k, astroid.Const):
key = k.value
if key in keys:
self.add_message("duplicate-key", node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node): # pylint: disable=unused-argument
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message("unreachable", node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not an in try...finally block
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, "finalbody") and _node in _parent.finalbody:
self.add_message("lost-exception", node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
def _check_reversed(self, node):
"""check that the argument to `reversed` is a sequence"""
try:
argument = utils.safe_infer(utils.get_argument_from_call(node, position=0))
except utils.NoSuchArgumentError:
pass
else:
if argument is astroid.Uninferable:
return
if argument is None:
# Nothing was inferred.
# Try to see if we have iter().
if isinstance(node.args[0], astroid.Call):
try:
func = next(node.args[0].func.infer())
except astroid.InferenceError:
return
if getattr(
func, "name", None
) == "iter" and utils.is_builtin_object(func):
self.add_message("bad-reversed-sequence", node=node)
return
if isinstance(argument, (astroid.List, astroid.Tuple)):
return
if isinstance(argument, astroid.Instance):
if any(
ancestor.name == "dict" and utils.is_builtin_object(ancestor)
for ancestor in itertools.chain(
(argument._proxied,), argument._proxied.ancestors()
)
):
# Mappings aren't accepted by reversed(), unless
# they provide explicitly a __reversed__ method.
try:
argument.locals[REVERSED_PROTOCOL_METHOD]
except KeyError:
self.add_message("bad-reversed-sequence", node=node)
return
if hasattr(argument, "getattr"):
# everything else is not a proper sequence for reversed()
for methods in REVERSED_METHODS:
for meth in methods:
try:
argument.getattr(meth)
except astroid.NotFoundError:
break
else:
break
else:
self.add_message("bad-reversed-sequence", node=node)
else:
self.add_message("bad-reversed-sequence", node=node)
@utils.check_messages("confusing-with-statement")
def visit_with(self, node):
# a "with" statement with multiple managers corresponds
# to one AST "With" node with multiple items
pairs = node.items
if pairs:
for prev_pair, pair in zip(pairs, pairs[1:]):
if isinstance(prev_pair[1], astroid.AssignName) and (
pair[1] is None and not isinstance(pair[0], astroid.Call)
):
# Don't emit a message if the second is a function call
# there's no way that can be mistaken for a name assignment.
# If the line number doesn't match
# we assume it's a nested "with".
self.add_message("confusing-with-statement", node=node)
def _check_self_assigning_variable(self, node):
# Detect assigning to the same variable.
scope = node.scope()
scope_locals = scope.locals
rhs_names = []
targets = node.targets
if isinstance(targets[0], astroid.Tuple):
if len(targets) != 1:
# A complex assignment, so bail out early.
return
targets = targets[0].elts
if len(targets) == 1:
# Unpacking a variable into the same name.
return
if isinstance(node.value, astroid.Name):
if len(targets) != 1:
return
rhs_names = [node.value]
elif isinstance(node.value, astroid.Tuple):
rhs_count = len(node.value.elts)
if len(targets) != rhs_count or rhs_count == 1:
return
rhs_names = node.value.elts
for target, lhs_name in zip(targets, rhs_names):
if not isinstance(lhs_name, astroid.Name):
continue
if not isinstance(target, astroid.AssignName):
continue
if isinstance(scope, astroid.ClassDef) and target.name in scope_locals:
# Check that the scope is different than a class level, which is usually
# a pattern to expose module level attributes as class level ones.
continue
if target.name == lhs_name.name:
self.add_message(
"self-assigning-variable", args=(target.name,), node=target
)
def _check_redeclared_assign_name(self, targets):
dummy_variables_rgx = lint_utils.get_global_option(
self, "dummy-variables-rgx", default=None
)
for target in targets:
if not isinstance(target, astroid.Tuple):
continue
found_names = []
for element in target.elts:
if isinstance(element, astroid.Tuple):
self._check_redeclared_assign_name([element])
elif isinstance(element, astroid.AssignName) and element.name != "_":
if dummy_variables_rgx and dummy_variables_rgx.match(element.name):
return
found_names.append(element.name)
names = collections.Counter(found_names)
for name, count in names.most_common():
if count > 1:
self.add_message(
"redeclared-assigned-name", args=(name,), node=target
)
@utils.check_messages("self-assigning-variable", "redeclared-assigned-name")
def visit_assign(self, node):
self._check_self_assigning_variable(node)
self._check_redeclared_assign_name(node.targets)
@utils.check_messages("redeclared-assigned-name")
def visit_for(self, node):
self._check_redeclared_assign_name([node.target])
KNOWN_NAME_TYPES = {
"module",
"const",
"class",
"function",
"method",
"attr",
"argument",
"variable",
"class_attribute",
"class_const",
"inlinevar",
}
HUMAN_READABLE_TYPES = {
"module": "module",
"const": "constant",
"class": "class",
"function": "function",
"method": "method",
"attr": "attribute",
"argument": "argument",
"variable": "variable",
"class_attribute": "class attribute",
"class_const": "class constant",
"inlinevar": "inline iteration",
}
DEFAULT_NAMING_STYLES = {
"module": "snake_case",
"const": "UPPER_CASE",
"class": "PascalCase",
"function": "snake_case",
"method": "snake_case",
"attr": "snake_case",
"argument": "snake_case",
"variable": "snake_case",
"class_attribute": "any",
"class_const": "UPPER_CASE",
"inlinevar": "any",
}
def _create_naming_options():
name_options = []
for name_type in sorted(KNOWN_NAME_TYPES):
human_readable_name = HUMAN_READABLE_TYPES[name_type]
default_style = DEFAULT_NAMING_STYLES[name_type]
name_type = name_type.replace("_", "-")
name_options.append(
(
f"{name_type}-naming-style",
{
"default": default_style,
"type": "choice",
"choices": list(NAMING_STYLES.keys()),
"metavar": "<style>",
"help": "Naming style matching correct %s names."
% (human_readable_name,),
},
)
)
name_options.append(
(
f"{name_type}-rgx",
{
"default": None,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression matching correct %s names. Overrides %s-naming-style."
% (human_readable_name, name_type),
},
)
)
return tuple(name_options)
class NameChecker(_BasicChecker):
msgs = {
"C0103": (
'%s name "%s" doesn\'t conform to %s',
"invalid-name",
"Used when the name doesn't conform to naming rules "
"associated to its type (constant, variable, class...).",
),
"C0104": (
'Disallowed name "%s"',
"disallowed-name",
"Used when the name matches bad-names or bad-names-rgxs- (unauthorized names).",
{
"old_names": [
("C0102", "blacklisted-name"),
]
},
),
"C0144": (
'%s name "%s" contains a non-ASCII unicode character',
"non-ascii-name",
"Used when the name contains at least one non-ASCII unicode character.",
),
"W0111": (
"Name %s will become a keyword in Python %s",
"assign-to-new-keyword",
"Used when assignment will become invalid in future "
"Python release due to introducing new keyword.",
),
}
options = (
(
"good-names",
{
"default": ("i", "j", "k", "ex", "Run", "_"),
"type": "csv",
"metavar": "<names>",
"help": "Good variable names which should always be accepted,"
" separated by a comma.",
},
),
(
"good-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Good variable names regexes, separated by a comma. If names match any regex,"
" they will always be accepted",
},
),
(
"bad-names",
{
"default": ("foo", "bar", "baz", "toto", "tutu", "tata"),
"type": "csv",
"metavar": "<names>",
"help": "Bad variable names which should always be refused, "
"separated by a comma.",
},
),
(
"bad-names-rgxs",
{
"default": "",
"type": "regexp_csv",
"metavar": "<names>",
"help": "Bad variable names regexes, separated by a comma. If names match any regex,"
" they will always be refused",
},
),
(
"name-group",
{
"default": (),
"type": "csv",
"metavar": "<name1:name2>",
"help": (
"Colon-delimited sets of names that determine each"
" other's naming style when the name regexes"
" allow several styles."
),
},
),
(
"include-naming-hint",
{
"default": False,
"type": "yn",
"metavar": "<y_or_n>",
"help": "Include a hint for the correct naming format with invalid-name.",
},
),
(
"property-classes",
{
"default": ("abc.abstractproperty",),
"type": "csv",
"metavar": "<decorator names>",
"help": "List of decorators that produce properties, such as "
"abc.abstractproperty. Add to this list to register "
"other decorators that produce valid properties. "
"These decorators are taken in consideration only for invalid-name.",
},
),
) + _create_naming_options()
KEYWORD_ONSET = {(3, 7): {"async", "await"}}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self._name_category = {}
self._name_group = {}
self._bad_names = {}
self._name_regexps = {}
self._name_hints = {}
self._good_names_rgxs_compiled = []
self._bad_names_rgxs_compiled = []
self._non_ascii_rgx_compiled = re.compile("[^\u0000-\u007F]")
def open(self):
self.stats = self.linter.add_stats(
badname_module=0,
badname_class=0,
badname_function=0,
badname_method=0,
badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0,
badname_class_attribute=0,
badname_class_const=0,
)
for group in self.config.name_group:
for name_type in group.split(":"):
self._name_group[name_type] = f"group_{group}"
regexps, hints = self._create_naming_rules()
self._name_regexps = regexps
self._name_hints = hints
self._good_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.good_names_rgxs
]
self._bad_names_rgxs_compiled = [
re.compile(rgxp) for rgxp in self.config.bad_names_rgxs
]
def _create_naming_rules(self):
regexps = {}
hints = {}
for name_type in KNOWN_NAME_TYPES:
naming_style_option_name = f"{name_type}_naming_style"
naming_style_name = getattr(self.config, naming_style_option_name)
regexps[name_type] = NAMING_STYLES[naming_style_name].get_regex(name_type)
custom_regex_setting_name = f"{name_type}_rgx"
custom_regex = getattr(self.config, custom_regex_setting_name, None)
if custom_regex is not None:
regexps[name_type] = custom_regex
if custom_regex is not None:
hints[name_type] = "%r pattern" % custom_regex.pattern
else:
hints[name_type] = "%s naming style" % naming_style_name
return regexps, hints
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_module(self, node):
self._check_name("module", node.name.split(".")[-1], node)
self._bad_names = {}
def leave_module(self, node): # pylint: disable=unused-argument
for all_groups in self._bad_names.values():
if len(all_groups) < 2:
continue
groups = collections.defaultdict(list)
min_warnings = sys.maxsize
for group in all_groups.values():
groups[len(group)].append(group)
min_warnings = min(len(group), min_warnings)
if len(groups[min_warnings]) > 1:
by_line = sorted(
groups[min_warnings],
key=lambda group: min(warning[0].lineno for warning in group),
)
warnings = itertools.chain(*by_line[1:])
else:
warnings = groups[min_warnings][0]
for args in warnings:
self._raise_name_warning(*args)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_classdef(self, node):
self._check_assign_to_new_keyword_violation(node.name, node)
self._check_name("class", node.name, node)
for attr, anodes in node.instance_attrs.items():
if not any(node.instance_attr_ancestors(attr)):
self._check_name("attr", attr, anodes[0])
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_functiondef(self, node):
# Do not emit any warnings if the method is just an implementation
# of a base class method.
self._check_assign_to_new_keyword_violation(node.name, node)
confidence = interfaces.HIGH
if node.is_method():
if utils.overrides_a_method(node.parent.frame(), node.name):
return
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
self._check_name(
_determine_function_name_type(node, config=self.config),
node.name,
node,
confidence,
)
# Check argument names
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
visit_asyncfunctiondef = visit_functiondef
@utils.check_messages("disallowed-name", "invalid-name", "non-ascii-name")
def visit_global(self, node):
for name in node.names:
self._check_name("const", name, node)
@utils.check_messages(
"disallowed-name", "invalid-name", "assign-to-new-keyword", "non-ascii-name"
)
def visit_assignname(self, node):
"""check module level assigned names"""
self._check_assign_to_new_keyword_violation(node.name, node)
frame = node.frame()
assign_type = node.assign_type()
if isinstance(assign_type, astroid.Comprehension):
self._check_name("inlinevar", node.name, node)
elif isinstance(frame, astroid.Module):
if isinstance(assign_type, astroid.Assign):
if isinstance(utils.safe_infer(assign_type.value), astroid.ClassDef):
self._check_name("class", node.name, node)
# Don't emit if the name redefines an import
# in an ImportError except handler.
elif not _redefines_import(node) and isinstance(
utils.safe_infer(assign_type.value), astroid.Const
):
self._check_name("const", node.name, node)
elif isinstance(assign_type, astroid.ExceptHandler):
self._check_name("variable", node.name, node)
elif isinstance(
assign_type, astroid.AnnAssign
) and utils.is_assign_name_annotated_with(node, "Final"):
self._check_name("const", node.name, node)
elif isinstance(frame, astroid.FunctionDef):
# global introduced variable aren't in the function locals
if node.name in frame and node.name not in frame.argnames():
if not _redefines_import(node):
self._check_name("variable", node.name, node)
elif isinstance(frame, astroid.ClassDef):
if not list(frame.local_attr_ancestors(node.name)):
for ancestor in frame.ancestors():
if (
ancestor.name == "Enum"
and ancestor.root().name == "enum"
or utils.is_assign_name_annotated_with(node, "Final")
):
self._check_name("class_const", node.name, node)
break
else:
self._check_name("class_attribute", node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astroid.AssignName):
self._check_name("argument", arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _find_name_group(self, node_type):
return self._name_group.get(node_type, node_type)
def _raise_name_warning(
self, node, node_type, name, confidence, warning="invalid-name"
):
type_label = HUMAN_READABLE_TYPES[node_type]
hint = self._name_hints[node_type]
if self.config.include_naming_hint:
hint += " (%r pattern)" % self._name_regexps[node_type].pattern
args = (
(type_label.capitalize(), name, hint)
if warning == "invalid-name"
else (type_label.capitalize(), name)
)
self.add_message(warning, node=node, args=args, confidence=confidence)
self.stats["badname_" + node_type] += 1
def _name_allowed_by_regex(self, name: str) -> bool:
return name in self.config.good_names or any(
pattern.match(name) for pattern in self._good_names_rgxs_compiled
)
def _name_disallowed_by_regex(self, name: str) -> bool:
return name in self.config.bad_names or any(
pattern.match(name) for pattern in self._bad_names_rgxs_compiled
)
def _check_name(self, node_type, name, node, confidence=interfaces.HIGH):
"""check for a name using the type's regexp"""
non_ascii_match = self._non_ascii_rgx_compiled.match(name)
if non_ascii_match is not None:
self._raise_name_warning(
node, node_type, name, confidence, warning="non-ascii-name"
)
def _should_exempt_from_invalid_name(node):
if node_type == "variable":
inferred = utils.safe_infer(node)
if isinstance(inferred, astroid.ClassDef):
return True
return False
if utils.is_inside_except(node):
clobbering, _ = utils.clobber_in_except(node)
if clobbering:
return
if self._name_allowed_by_regex(name=name):
return
if self._name_disallowed_by_regex(name=name):
self.stats["badname_" + node_type] += 1
self.add_message("disallowed-name", node=node, args=name)
return
regexp = self._name_regexps[node_type]
match = regexp.match(name)
if _is_multi_naming_match(match, node_type, confidence):
name_group = self._find_name_group(node_type)
bad_name_group = self._bad_names.setdefault(name_group, {})
warnings = bad_name_group.setdefault(match.lastgroup, [])
warnings.append((node, node_type, name, confidence))
if match is None and not _should_exempt_from_invalid_name(node):
self._raise_name_warning(node, node_type, name, confidence)
def _check_assign_to_new_keyword_violation(self, name, node):
keyword_first_version = self._name_became_keyword_in_version(
name, self.KEYWORD_ONSET
)
if keyword_first_version is not None:
self.add_message(
"assign-to-new-keyword",
node=node,
args=(name, keyword_first_version),
confidence=interfaces.HIGH,
)
@staticmethod
def _name_became_keyword_in_version(name, rules):
for version, keywords in rules.items():
if name in keywords and sys.version_info < version:
return ".".join(str(v) for v in version)
return None
class DocStringChecker(_BasicChecker):
msgs = {
"C0112": (
"Empty %s docstring",
"empty-docstring",
"Used when a module, function, class or method has an empty "
"docstring (it would be too easy ;).",
{"old_names": [("W0132", "old-empty-docstring")]},
),
"C0114": (
"Missing module docstring",
"missing-module-docstring",
"Used when a module has no docstring."
"Empty modules do not require a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0115": (
"Missing class docstring",
"missing-class-docstring",
"Used when a class has no docstring."
"Even an empty class must have a docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
"C0116": (
"Missing function or method docstring",
"missing-function-docstring",
"Used when a function or method has no docstring."
"Some special methods like __init__ do not require a "
"docstring.",
{"old_names": [("C0111", "missing-docstring")]},
),
}
options = (
(
"no-docstring-rgx",
{
"default": NO_REQUIRED_DOC_RGX,
"type": "regexp",
"metavar": "<regexp>",
"help": "Regular expression which should only match "
"function or class names that do not require a "
"docstring.",
},
),
(
"docstring-min-length",
{
"default": -1,
"type": "int",
"metavar": "<int>",
"help": (
"Minimum line length for functions/classes that"
" require docstrings, shorter ones are exempt."
),
},
),
)
def open(self):
self.stats = self.linter.add_stats(
undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0,
)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_module(self, node):
self._check_docstring("module", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_classdef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring("class", node)
@utils.check_messages("missing-docstring", "empty-docstring")
def visit_functiondef(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = "method" if node.is_method() else "function"
if (
is_property_setter(node)
or is_property_deleter(node)
or is_overload_stub(node)
):
return
if isinstance(node.parent.frame(), astroid.ClassDef):
overridden = False
confidence = (
interfaces.INFERENCE
if utils.has_known_bases(node.parent.frame())
else interfaces.INFERENCE_FAILURE
)
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and isinstance(
ancestor[node.name], astroid.FunctionDef
):
overridden = True
break
self._check_docstring(
ftype, node, report_missing=not overridden, confidence=confidence
)
elif isinstance(node.parent.frame(), astroid.Module):
self._check_docstring(ftype, node)
else:
return
visit_asyncfunctiondef = visit_functiondef
def _check_docstring(
self, node_type, node, report_missing=True, confidence=interfaces.HIGH
):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
docstring = _infer_dunder_doc_attribute(node)
if docstring is None:
if not report_missing:
return
lines = utils.get_node_last_lineno(node) - node.lineno
if node_type == "module" and not lines:
# If the module has no body, there's no reason
# to require a docstring.
return
max_lines = self.config.docstring_min_length
if node_type != "module" and max_lines > -1 and lines < max_lines:
return
self.stats["undocumented_" + node_type] += 1
if (
node.body
and isinstance(node.body[0], astroid.Expr)
and isinstance(node.body[0].value, astroid.Call)
):
# Most likely a string with a format call. Let's see.
func = utils.safe_infer(node.body[0].value.func)
if isinstance(func, astroid.BoundMethod) and isinstance(
func.bound, astroid.Instance
):
# Strings.
if func.bound.name in ("str", "unicode", "bytes"):
return
if node_type == "module":
message = "missing-module-docstring"
elif node_type == "class":
message = "missing-class-docstring"
else:
message = "missing-function-docstring"
self.add_message(message, node=node, confidence=confidence)
elif not docstring.strip():
self.stats["undocumented_" + node_type] += 1
self.add_message(
"empty-docstring", node=node, args=(node_type,), confidence=confidence
)
class PassChecker(_BasicChecker):
"""check if the pass statement is really necessary"""
msgs = {
"W0107": (
"Unnecessary pass statement",
"unnecessary-pass",
'Used when a "pass" statement that can be avoided is encountered.',
)
}
@utils.check_messages("unnecessary-pass")
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1 or (
isinstance(node.parent, (astroid.ClassDef, astroid.FunctionDef))
and (node.parent.doc is not None)
):
self.add_message("unnecessary-pass", node=node)
def _is_one_arg_pos_call(call):
"""Is this a call with exactly 1 argument,
where that argument is positional?
"""
return isinstance(call, astroid.Call) and len(call.args) == 1 and not call.keywords
def _infer_dunder_doc_attribute(node):
# Try to see if we have a `__doc__` attribute.
try:
docstring = node["__doc__"]
except KeyError:
return None
docstring = utils.safe_infer(docstring)
if not docstring:
return None
if not isinstance(docstring, astroid.Const):
return None
return docstring.value
class ComparisonChecker(_BasicChecker):
"""Checks for comparisons
- singleton comparison: 'expr == True', 'expr == False' and 'expr == None'
- yoda condition: 'const "comp" right' where comp can be '==', '!=', '<',
'<=', '>' or '>=', and right can be a variable, an attribute, a method or
a function
"""
msgs = {
"C0121": (
"Comparison %s should be %s",
"singleton-comparison",
"Used when an expression is compared to singleton "
"values like True, False or None.",
),
"C0122": (
"Comparison should be %s",
"misplaced-comparison-constant",
"Used when the constant is placed on the left side "
"of a comparison. It is usually clearer in intent to "
"place it in the right hand side of the comparison.",
),
"C0123": (
"Use isinstance() rather than type() for a typecheck.",
"unidiomatic-typecheck",
"The idiomatic way to perform an explicit typecheck in "
"Python is to use isinstance(x, Y) rather than "
"type(x) == Y, type(x) is Y. Though there are unusual "
"situations where these give different results.",
{"old_names": [("W0154", "old-unidiomatic-typecheck")]},
),
"R0123": (
"Comparison to literal",
"literal-comparison",
"Used when comparing an object to a literal, which is usually "
"what you do not want to do, since you can compare to a different "
"literal than what was expected altogether.",
),
"R0124": (
"Redundant comparison - %s",
"comparison-with-itself",
"Used when something is compared against itself.",
),
"W0143": (
"Comparing against a callable, did you omit the parenthesis?",
"comparison-with-callable",
"This message is emitted when pylint detects that a comparison with a "
"callable was made, which might suggest that some parenthesis were omitted, "
"resulting in potential unwanted behaviour.",
),
"W0177": (
"Comparison %s should be %s",
"nan-comparison",
"Used when an expression is compared to NaN"
"values like numpy.NaN and float('nan')",
),
}
def _check_singleton_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
"""Check if == or != is being used to compare a singleton value"""
singleton_values = (True, False, None)
def _is_singleton_const(node) -> bool:
return isinstance(node, astroid.Const) and any(
node.value is value for value in singleton_values
)
if _is_singleton_const(left_value):
singleton, other_value = left_value.value, right_value
elif _is_singleton_const(right_value):
singleton, other_value = right_value.value, left_value
else:
return
singleton_comparison_example = {False: "'{} is {}'", True: "'{} is not {}'"}
# True/False singletons have a special-cased message in case the user is
# mistakenly using == or != to check for truthiness
if singleton in (True, False):
suggestion_template = (
"{} if checking for the singleton value {}, or {} if testing for {}"
)
truthiness_example = {False: "not {}", True: "{}"}
truthiness_phrase = {True: "truthiness", False: "falsiness"}
# Looks for comparisons like x == True or x != False
checking_truthiness = singleton is not checking_for_absence
suggestion = suggestion_template.format(
singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
),
singleton,
(
"'bool({})'"
if not utils.is_test_condition(root_node) and checking_truthiness
else "'{}'"
).format(
truthiness_example[checking_truthiness].format(
other_value.as_string()
)
),
truthiness_phrase[checking_truthiness],
)
else:
suggestion = singleton_comparison_example[checking_for_absence].format(
left_value.as_string(), right_value.as_string()
)
self.add_message(
"singleton-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_nan_comparison(
self, left_value, right_value, root_node, checking_for_absence: bool = False
):
def _is_float_nan(node):
try:
if isinstance(node, astroid.Call) and len(node.args) == 1:
if (
node.args[0].value.lower() == "nan"
and node.inferred()[0].pytype() == "builtins.float"
):
return True
return False
except AttributeError:
return False
def _is_numpy_nan(node):
if isinstance(node, astroid.Attribute) and node.attrname == "NaN":
if isinstance(node.expr, astroid.Name):
return node.expr.name in ("numpy", "nmp", "np")
return False
def _is_nan(node) -> bool:
return _is_float_nan(node) or _is_numpy_nan(node)
nan_left = _is_nan(left_value)
if not nan_left and not _is_nan(right_value):
return
absence_text = ""
if checking_for_absence:
absence_text = "not "
if nan_left:
suggestion = f"'{absence_text}math.isnan({right_value.as_string()})'"
else:
suggestion = f"'{absence_text}math.isnan({left_value.as_string()})'"
self.add_message(
"nan-comparison",
node=root_node,
args=(f"'{root_node.as_string()}'", suggestion),
)
def _check_literal_comparison(self, literal, node):
"""Check if we compare to a literal, which is usually what we do not want to do."""
nodes = (astroid.List, astroid.Tuple, astroid.Dict, astroid.Set)
is_other_literal = isinstance(literal, nodes)
is_const = False
if isinstance(literal, astroid.Const):
if isinstance(literal.value, bool) or literal.value is None:
# Not interested in this values.
return
is_const = isinstance(literal.value, (bytes, str, int, float))
if is_const or is_other_literal:
self.add_message("literal-comparison", node=node)
def _check_misplaced_constant(self, node, left, right, operator):
if isinstance(right, astroid.Const):
return
operator = REVERSED_COMPS.get(operator, operator)
suggestion = f"{right.as_string()} {operator} {left.value!r}"
self.add_message("misplaced-comparison-constant", node=node, args=(suggestion,))
def _check_logical_tautology(self, node):
"""Check if identifier is compared against itself.
:param node: Compare node
:type node: astroid.node_classes.Compare
:Example:
val = 786
if val == val: # [comparison-with-itself]
pass
"""
left_operand = node.left
right_operand = node.ops[0][1]
operator = node.ops[0][0]
if isinstance(left_operand, astroid.Const) and isinstance(
right_operand, astroid.Const
):
left_operand = left_operand.value
right_operand = right_operand.value
elif isinstance(left_operand, astroid.Name) and isinstance(
right_operand, astroid.Name
):
left_operand = left_operand.name
right_operand = right_operand.name
if left_operand == right_operand:
suggestion = f"{left_operand} {operator} {right_operand}"
self.add_message("comparison-with-itself", node=node, args=(suggestion,))
def _check_callable_comparison(self, node):
operator = node.ops[0][0]
if operator not in COMPARISON_OPERATORS:
return
bare_callables = (astroid.FunctionDef, astroid.BoundMethod)
left_operand, right_operand = node.left, node.ops[0][1]
# this message should be emitted only when there is comparison of bare callable
# with non bare callable.
if (
sum(
1
for operand in (left_operand, right_operand)
if isinstance(utils.safe_infer(operand), bare_callables)
)
== 1
):
self.add_message("comparison-with-callable", node=node)
@utils.check_messages(
"singleton-comparison",
"misplaced-comparison-constant",
"unidiomatic-typecheck",
"literal-comparison",
"comparison-with-itself",
"comparison-with-callable",
)
def visit_compare(self, node):
self._check_callable_comparison(node)
self._check_logical_tautology(node)
self._check_unidiomatic_typecheck(node)
# NOTE: this checker only works with binary comparisons like 'x == 42'
# but not 'x == y == 42'
if len(node.ops) != 1:
return
left = node.left
operator, right = node.ops[0]
if operator in COMPARISON_OPERATORS and isinstance(left, astroid.Const):
self._check_misplaced_constant(node, left, right, operator)
if operator in ("==", "!="):
self._check_singleton_comparison(
left, right, node, checking_for_absence=operator == "!="
)
if operator in ("==", "!=", "is", "is not"):
self._check_nan_comparison(
left, right, node, checking_for_absence=operator in ("!=", "is not")
)
if operator in ("is", "is not"):
self._check_literal_comparison(right, node)
def _check_unidiomatic_typecheck(self, node):
operator, right = node.ops[0]
if operator in TYPECHECK_COMPARISON_OPERATORS:
left = node.left
if _is_one_arg_pos_call(left):
self._check_type_x_is_y(node, left, operator, right)
def _check_type_x_is_y(self, node, left, operator, right):
"""Check for expressions like type(x) == Y."""
left_func = utils.safe_infer(left.func)
if not (
isinstance(left_func, astroid.ClassDef) and left_func.qname() == TYPE_QNAME
):
return
if operator in ("is", "is not") and _is_one_arg_pos_call(right):
right_func = utils.safe_infer(right.func)
if (
isinstance(right_func, astroid.ClassDef)
and right_func.qname() == TYPE_QNAME
):
# type(x) == type(a)
right_arg = utils.safe_infer(right.args[0])
if not isinstance(right_arg, LITERAL_NODE_TYPES):
# not e.g. type(x) == type([])
return
self.add_message("unidiomatic-typecheck", node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter))
|
__init__
|
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
|
"""
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.Checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
# MASKED: __init__ function (lines 12-144)
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.username and self.sharedKey:
tlsConnection.handshakeClientSharedKey(username=self.username,
sharedKey=self.sharedKey,
settings=self.settings)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session
|
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
| 12
| 144
|
"""
A helper class for using TLS Lite with stdlib clients
(httplib, xmlrpclib, imaplib, poplib).
"""
from tlslite.Checker import Checker
class ClientHelper:
"""This is a helper class used to integrate TLS Lite with various
TLS clients (e.g. poplib, smtplib, httplib, etc.)"""
def __init__(self,
username=None, password=None, sharedKey=None,
certChain=None, privateKey=None,
cryptoID=None, protocol=None,
x509Fingerprint=None,
x509TrustList=None, x509CommonName=None,
settings = None):
"""
For client authentication, use one of these argument
combinations:
- username, password (SRP)
- username, sharedKey (shared-key)
- certChain, privateKey (certificate)
For server authentication, you can either rely on the
implicit mutual authentication performed by SRP or
shared-keys, or you can do certificate-based server
authentication with one of these argument combinations:
- cryptoID[, protocol] (requires cryptoIDlib)
- x509Fingerprint
- x509TrustList[, x509CommonName] (requires cryptlib_py)
Certificate-based server authentication is compatible with
SRP or certificate-based client authentication. It is
not compatible with shared-keys.
The constructor does not perform the TLS handshake itself, but
simply stores these arguments for later. The handshake is
performed only when this class needs to connect with the
server. Then you should be prepared to handle TLS-specific
exceptions. See the client handshake functions in
L{tlslite.TLSConnection.TLSConnection} for details on which
exceptions might be raised.
@type username: str
@param username: SRP or shared-key username. Requires the
'password' or 'sharedKey' argument.
@type password: str
@param password: SRP password for mutual authentication.
Requires the 'username' argument.
@type sharedKey: str
@param sharedKey: Shared key for mutual authentication.
Requires the 'username' argument.
@type certChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@param certChain: Certificate chain for client authentication.
Requires the 'privateKey' argument. Excludes the SRP or
shared-key related arguments.
@type privateKey: L{tlslite.utils.RSAKey.RSAKey}
@param privateKey: Private key for client authentication.
Requires the 'certChain' argument. Excludes the SRP or
shared-key related arguments.
@type cryptoID: str
@param cryptoID: cryptoID for server authentication. Mutually
exclusive with the 'x509...' arguments.
@type protocol: str
@param protocol: cryptoID protocol URI for server
authentication. Requires the 'cryptoID' argument.
@type x509Fingerprint: str
@param x509Fingerprint: Hex-encoded X.509 fingerprint for
server authentication. Mutually exclusive with the 'cryptoID'
and 'x509TrustList' arguments.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
other party must present a certificate chain which extends to
one of these root certificates. The cryptlib_py module must be
installed to use this parameter. Mutually exclusive with the
'cryptoID' and 'x509Fingerprint' arguments.
@type x509CommonName: str
@param x509CommonName: The end-entity certificate's 'CN' field
must match this value. For a web server, this is typically a
server name such as 'www.amazon.com'. Mutually exclusive with
the 'cryptoID' and 'x509Fingerprint' arguments. Requires the
'x509TrustList' argument.
@type settings: L{tlslite.HandshakeSettings.HandshakeSettings}
@param settings: Various settings which can be used to control
the ciphersuites, certificate types, and SSL/TLS versions
offered by the client.
"""
self.username = None
self.password = None
self.sharedKey = None
self.certChain = None
self.privateKey = None
self.checker = None
#SRP Authentication
if username and password and not \
(sharedKey or certChain or privateKey):
self.username = username
self.password = password
#Shared Key Authentication
elif username and sharedKey and not \
(password or certChain or privateKey):
self.username = username
self.sharedKey = sharedKey
#Certificate Chain Authentication
elif certChain and privateKey and not \
(username or password or sharedKey):
self.certChain = certChain
self.privateKey = privateKey
#No Authentication
elif not password and not username and not \
sharedKey and not certChain and not privateKey:
pass
else:
raise ValueError("Bad parameters")
#Authenticate the server based on its cryptoID or fingerprint
if sharedKey and (cryptoID or protocol or x509Fingerprint):
raise ValueError("Can't use shared keys with other forms of"\
"authentication")
self.checker = Checker(cryptoID, protocol, x509Fingerprint,
x509TrustList, x509CommonName)
self.settings = settings
self.tlsSession = None
def _handshake(self, tlsConnection):
if self.username and self.password:
tlsConnection.handshakeClientSRP(username=self.username,
password=self.password,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
elif self.username and self.sharedKey:
tlsConnection.handshakeClientSharedKey(username=self.username,
sharedKey=self.sharedKey,
settings=self.settings)
else:
tlsConnection.handshakeClientCert(certChain=self.certChain,
privateKey=self.privateKey,
checker=self.checker,
settings=self.settings,
session=self.tlsSession)
self.tlsSession = tlsConnection.session
|
spline_filter1d
|
Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
# MASKED: spline_filter1d function (lines 40-59)
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
| 40
| 59
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
spline_filter
|
Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
# MASKED: spline_filter function (lines 62-85)
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
| 62
| 85
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
geometric_transform
|
Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
# MASKED: geometric_transform function (lines 87-141)
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
| 87
| 141
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
affine_transform
|
Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
# MASKED: affine_transform function (lines 248-305)
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
| 248
| 305
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
shift
|
Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
# MASKED: shift function (lines 308-338)
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
| 308
| 338
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
zoom
|
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
# MASKED: zoom function (lines 341-371)
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
| 341
| 371
|
# Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
import numpy
from . import _ni_support
from kapteyn import _nd_image
def _extend_mode_to_code(mode):
mode = _ni_support._extend_mode_to_code(mode)
return mode
def spline_filter1d(input, order = 3, axis = -1, output = numpy.float64,
output_type = None):
"""Calculates a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output)
return return_value
def spline_filter(input, order = 3, output = numpy.float64,
output_type = None):
"""Multi-dimensional spline filter.
Note: The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output, return_value = _ni_support._get_output(output, input,
output_type)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output = output)
input = output
else:
output[...] = input[...]
return return_value
def geometric_transform(input, mapping, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True,
extra_arguments = (), extra_keywords = {}):
"""Apply an arbritrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
mapping must be a callable object that accepts a tuple of length
equal to the output array rank and returns the corresponding input
coordinates as a tuple of length equal to the input array
rank. Points outside the boundaries of the input are filled
according to the given mode ('constant', 'nearest', 'reflect' or
'wrap'). The output shape can optionally be given. If not given,
it is equal to the input shape. The parameter prefilter determines
if the input is pre-filtered before interpolation (necessary for
spline interpolation of order > 1). If False it is assumed that
the input is already filtered. The extra_arguments and
extra_keywords arguments can be used to provide extra arguments
and keywords that are passed to the mapping function at each call.
Example
-------
>>> a = arange(12.).reshape((4,3))
>>> def shift_func(output_coordinates):
... return (output_coordinates[0]-0.5, output_coordinates[1]-0.5)
...
>>> print geometric_transform(a,shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.3625, 2.7375],
[ 0. , 4.8125, 6.1875],
[ 0. , 8.2625, 9.6375]])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None,
output, order, mode, cval, extra_arguments, extra_keywords)
return return_value
def map_coordinates(input, coordinates, output_type = None, output = None,
order = 3, mode = 'constant', cval = 0.0, prefilter = True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
input : ndarray
The input array
coordinates : array_like
The coordinates at which `input` is evaluated.
output_type : deprecated
Use `output` instead.
output : dtype, optional
If the output has to have a certain type, specify the dtype.
The default behavior is for the output to have the same type
as `input`.
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
mode : str, optional
Points outside the boundaries of the input are filled according
to the given mode ('constant', 'nearest', 'reflect' or 'wrap').
Default is 'constant'.
cval : scalar, optional
Value used for points outside the boundaries of the input if
`mode='constant`. Default is 0.0
prefilter : bool, optional
The parameter prefilter determines if the input is
pre-filtered with `spline_filter`_ before interpolation
(necessary for spline interpolation of order > 1).
If False, it is assumed that the input is already filtered.
Returns
-------
return_value : ndarray
The result of transforming the input. The shape of the
output is derived from that of `coordinates` by dropping
the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> import scipy.ndimage
>>> a = np.arange(12.).reshape((4,3))
>>> print a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> sp.ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
[ 2. 7.]
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> sp.ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> sp.ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return return_value
def affine_transform(input, matrix, offset = 0.0, output_shape = None,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Apply an affine transformation.
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode. The output shape can optionally be given. If not given it is
equal to the input shape. The parameter prefilter determines if the
input is pre-filtered before interpolation, if False it is assumed
that the input is already filtered.
The matrix must be two-dimensional or can also be given as a
one-dimensional sequence or array. In the latter case, it is
assumed that the matrix is diagonal. A more efficient algorithms
is then applied that exploits the separability of the problem.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
matrix = numpy.asarray(matrix, dtype = numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype = numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
_nd_image.zoom_shift(filtered, matrix, offset, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return return_value
def shift(input, shift, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Shift an array.
The array is shifted using spline interpolation of the requested
order. Points outside the boundaries of the input are filled according
to the given mode. The parameter prefilter determines if the input is
pre-filtered before interpolation, if False it is assumed that the
input is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
output, return_value = _ni_support._get_output(output, input,
output_type)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype = numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return return_value
def zoom(input, zoom, output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output = numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple([int(ii * jj) for ii, jj in zip(input.shape, zoom)])
zoom = (numpy.array(input.shape)-1)/(numpy.array(output_shape,float)-1)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
zoom = numpy.asarray(zoom, dtype = numpy.float64)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return return_value
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
if coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
if coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(input, angle, axes = (1, 0), reshape = True,
output_type = None, output = None, order = 3,
mode = 'constant', cval = 0.0, prefilter = True):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
axes parameter using spline interpolation of the requested order. The
angle is given in degrees. Points outside the boundaries of the input
are filled according to the given mode. If reshape is true, the output
shape is adapted so that the input array is contained completely in
the output. The parameter prefilter determines if the input is pre-
filtered before interpolation, if False it is assumed that the input
is already filtered.
"""
input = numpy.asarray(input)
axes = list(axes)
rank = input.ndim
if axes[0] < 0:
axes[0] += rank
if axes[1] < 0:
axes[1] += rank
if axes[0] < 0 or axes[1] < 0 or axes[0] > rank or axes[1] > rank:
raise RuntimeError('invalid rotation plane specified')
if axes[0] > axes[1]:
axes = axes[1], axes[0]
angle = numpy.pi / 180 * angle
m11 = math.cos(angle)
m12 = math.sin(angle)
m21 = -math.sin(angle)
m22 = math.cos(angle)
matrix = numpy.array([[m11, m12],
[m21, m22]], dtype = numpy.float64)
iy = input.shape[axes[0]]
ix = input.shape[axes[1]]
if reshape:
mtrx = numpy.array([[ m11, -m21],
[-m12, m22]], dtype = numpy.float64)
minc = [0, 0]
maxc = [0, 0]
coor = numpy.dot(mtrx, [0, ix])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, 0])
minc, maxc = _minmax(coor, minc, maxc)
coor = numpy.dot(mtrx, [iy, ix])
minc, maxc = _minmax(coor, minc, maxc)
oy = int(maxc[0] - minc[0] + 0.5)
ox = int(maxc[1] - minc[1] + 0.5)
else:
oy = input.shape[axes[0]]
ox = input.shape[axes[1]]
offset = numpy.zeros((2,), dtype = numpy.float64)
offset[0] = float(oy) / 2.0 - 0.5
offset[1] = float(ox) / 2.0 - 0.5
offset = numpy.dot(matrix, offset)
tmp = numpy.zeros((2,), dtype = numpy.float64)
tmp[0] = float(iy) / 2.0 - 0.5
tmp[1] = float(ix) / 2.0 - 0.5
offset = tmp - offset
output_shape = list(input.shape)
output_shape[axes[0]] = oy
output_shape[axes[1]] = ox
output_shape = tuple(output_shape)
output, return_value = _ni_support._get_output(output, input,
output_type, shape = output_shape)
if input.ndim <= 2:
affine_transform(input, matrix, offset, output_shape, None, output,
order, mode, cval, prefilter)
else:
coordinates = []
size = numpy.product(input.shape,axis=0)
size /= input.shape[axes[0]]
size /= input.shape[axes[1]]
for ii in range(input.ndim):
if ii not in axes:
coordinates.append(0)
else:
coordinates.append(slice(None, None, None))
iter_axes = list(range(input.ndim))
iter_axes.reverse()
iter_axes.remove(axes[0])
iter_axes.remove(axes[1])
os = (output_shape[axes[0]], output_shape[axes[1]])
for ii in range(size):
ia = input[tuple(coordinates)]
oa = output[tuple(coordinates)]
affine_transform(ia, matrix, offset, os, None, oa, order, mode,
cval, prefilter)
for jj in iter_axes:
if coordinates[jj] < input.shape[jj] - 1:
coordinates[jj] += 1
break
else:
coordinates[jj] = 0
return return_value
|
make_request
|
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
Args:
method (bytes/unicode): The HTTP request method ("verb").
path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
Returns:
A synapse.http.site.SynapseRequest.
|
import json
from io import BytesIO
from six import text_type
import attr
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import HostResolution
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorPluggableNameResolver
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactorClock
from synapse.http.site import SynapseRequest
from synapse.util import Clock
from tests.utils import setup_test_homeserver as _sth
class TimedOutException(Exception):
"""
A web query timed out.
"""
@attr.s
class FakeChannel(object):
"""
A fake Twisted Web Channel (the part that interfaces with the
wire).
"""
_reactor = attr.ib()
result = attr.ib(default=attr.Factory(dict))
_producer = None
@property
def json_body(self):
if not self.result:
raise Exception("No result yet.")
return json.loads(self.result["body"].decode('utf8'))
@property
def code(self):
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content):
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
def registerProducer(self, producer, streaming):
self._producer = producer
self.producerStreaming = streaming
def _produce():
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
return
self._producer = None
def requestDone(self, _self):
self.result["done"] = True
def getPeer(self):
# We give an address so that getClientIP returns a non null entry,
# causing us to record the MAU
return address.IPv4Address("TCP", "127.0.0.1", 3423)
def getHost(self):
return None
@property
def transport(self):
return self
class FakeSite:
"""
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
"""
server_version_string = b"1"
site_tag = "test"
@property
def access_logger(self):
class FakeLogger:
def info(self, *args, **kwargs):
pass
return FakeLogger()
# MASKED: make_request function (lines 119-175)
def wait_until_result(clock, request, timeout=100):
"""
Wait until the request is finished.
"""
clock.run()
x = 0
while not request.finished:
# If there's a producer, tell it to resume producing so we get content
if request._channel._producer:
request._channel._producer.resumeProducing()
x += 1
if x > timeout:
raise TimedOutException("Timed out waiting for request to finish.")
clock.advance(0.1)
def render(request, resource, clock):
request.render(resource)
wait_until_result(clock, request)
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
def __init__(self):
self._udp = []
self.lookups = {}
class Resolver(object):
def resolveHostName(
_self,
resolutionReceiver,
hostName,
portNumber=0,
addressTypes=None,
transportSemantics='TCP',
):
resolution = HostResolution(hostName)
resolutionReceiver.resolutionBegan(resolution)
if hostName not in self.lookups:
raise DNSLookupError("OH NO")
resolutionReceiver.addressResolved(
IPv4Address('TCP', self.lookups[hostName], portNumber)
)
resolutionReceiver.resolutionComplete()
return resolution
self.nameResolver = Resolver()
super(ThreadedMemoryReactorClock, self).__init__()
def listenUDP(self, port, protocol, interface='', maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
"""
d = Deferred()
d.addCallback(lambda x: callback(*args, **kwargs))
self.callLater(0, d.callback, True)
return d
def setup_test_homeserver(cleanup_func, *args, **kwargs):
"""
Set up a synchronous test server, driven by the reactor used by
the homeserver.
"""
d = _sth(cleanup_func, *args, **kwargs).result
if isinstance(d, Failure):
d.raiseException()
# Make the thread pool synchronous.
clock = d.get_clock()
pool = d.get_db_pool()
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
class ThreadPool:
"""
Threadless thread pool.
"""
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
clock._reactor.callLater(0, d.callback, True)
return d
clock.threadpool = ThreadPool()
pool.threadpool = ThreadPool()
pool.running = True
return d
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return (clock, hs_clock)
@attr.s
class FakeTransport(object):
"""
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
"""
other = attr.ib()
"""The Protocol object which will receive any data written to this transport.
:type: twisted.internet.interfaces.IProtocol
"""
_reactor = attr.ib()
"""Test reactor
:type: twisted.internet.interfaces.IReactorTime
"""
disconnecting = False
buffer = attr.ib(default=b'')
producer = attr.ib(default=None)
def getPeer(self):
return None
def getHost(self):
return None
def loseConnection(self):
self.disconnecting = True
def abortConnection(self):
self.disconnecting = True
def pauseProducing(self):
self.producer.pauseProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
d = self.producer.resumeProducing()
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
self.buffer = self.buffer + byt
def _write():
if getattr(self.other, "transport") is not None:
self.other.dataReceived(self.buffer)
self.buffer = b""
return
self._reactor.callLater(0.0, _write)
_write()
def writeSequence(self, seq):
for x in seq:
self.write(x)
|
def make_request(
reactor,
method,
path,
content=b"",
access_token=None,
request=SynapseRequest,
shorthand=True,
):
"""
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
Args:
method (bytes/unicode): The HTTP request method ("verb").
path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
Returns:
A synapse.http.site.SynapseRequest.
"""
if not isinstance(method, bytes):
method = method.encode('ascii')
if not isinstance(path, bytes):
path = path.encode('ascii')
# Decorate it to be the full path, if we're using shorthand
if shorthand and not path.startswith(b"/_matrix"):
path = b"/_matrix/client/r0/" + path
path = path.replace(b"//", b"/")
if isinstance(content, text_type):
content = content.encode('utf8')
site = FakeSite()
channel = FakeChannel(reactor)
req = request(site, channel)
req.process = lambda: b""
req.content = BytesIO(content)
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode('ascii')
)
if content:
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
req.requestReceived(method, path, b"1.1")
return req, channel
| 119
| 175
|
import json
from io import BytesIO
from six import text_type
import attr
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import HostResolution
from twisted.internet.address import IPv4Address
from twisted.internet.defer import Deferred
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import IReactorPluggableNameResolver
from twisted.python.failure import Failure
from twisted.test.proto_helpers import MemoryReactorClock
from synapse.http.site import SynapseRequest
from synapse.util import Clock
from tests.utils import setup_test_homeserver as _sth
class TimedOutException(Exception):
"""
A web query timed out.
"""
@attr.s
class FakeChannel(object):
"""
A fake Twisted Web Channel (the part that interfaces with the
wire).
"""
_reactor = attr.ib()
result = attr.ib(default=attr.Factory(dict))
_producer = None
@property
def json_body(self):
if not self.result:
raise Exception("No result yet.")
return json.loads(self.result["body"].decode('utf8'))
@property
def code(self):
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content):
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
def registerProducer(self, producer, streaming):
self._producer = producer
self.producerStreaming = streaming
def _produce():
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
return
self._producer = None
def requestDone(self, _self):
self.result["done"] = True
def getPeer(self):
# We give an address so that getClientIP returns a non null entry,
# causing us to record the MAU
return address.IPv4Address("TCP", "127.0.0.1", 3423)
def getHost(self):
return None
@property
def transport(self):
return self
class FakeSite:
"""
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
"""
server_version_string = b"1"
site_tag = "test"
@property
def access_logger(self):
class FakeLogger:
def info(self, *args, **kwargs):
pass
return FakeLogger()
def make_request(
reactor,
method,
path,
content=b"",
access_token=None,
request=SynapseRequest,
shorthand=True,
):
"""
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
Args:
method (bytes/unicode): The HTTP request method ("verb").
path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
Returns:
A synapse.http.site.SynapseRequest.
"""
if not isinstance(method, bytes):
method = method.encode('ascii')
if not isinstance(path, bytes):
path = path.encode('ascii')
# Decorate it to be the full path, if we're using shorthand
if shorthand and not path.startswith(b"/_matrix"):
path = b"/_matrix/client/r0/" + path
path = path.replace(b"//", b"/")
if isinstance(content, text_type):
content = content.encode('utf8')
site = FakeSite()
channel = FakeChannel(reactor)
req = request(site, channel)
req.process = lambda: b""
req.content = BytesIO(content)
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode('ascii')
)
if content:
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
req.requestReceived(method, path, b"1.1")
return req, channel
def wait_until_result(clock, request, timeout=100):
"""
Wait until the request is finished.
"""
clock.run()
x = 0
while not request.finished:
# If there's a producer, tell it to resume producing so we get content
if request._channel._producer:
request._channel._producer.resumeProducing()
x += 1
if x > timeout:
raise TimedOutException("Timed out waiting for request to finish.")
clock.advance(0.1)
def render(request, resource, clock):
request.render(resource)
wait_until_result(clock, request)
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
def __init__(self):
self._udp = []
self.lookups = {}
class Resolver(object):
def resolveHostName(
_self,
resolutionReceiver,
hostName,
portNumber=0,
addressTypes=None,
transportSemantics='TCP',
):
resolution = HostResolution(hostName)
resolutionReceiver.resolutionBegan(resolution)
if hostName not in self.lookups:
raise DNSLookupError("OH NO")
resolutionReceiver.addressResolved(
IPv4Address('TCP', self.lookups[hostName], portNumber)
)
resolutionReceiver.resolutionComplete()
return resolution
self.nameResolver = Resolver()
super(ThreadedMemoryReactorClock, self).__init__()
def listenUDP(self, port, protocol, interface='', maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
"""
d = Deferred()
d.addCallback(lambda x: callback(*args, **kwargs))
self.callLater(0, d.callback, True)
return d
def setup_test_homeserver(cleanup_func, *args, **kwargs):
"""
Set up a synchronous test server, driven by the reactor used by
the homeserver.
"""
d = _sth(cleanup_func, *args, **kwargs).result
if isinstance(d, Failure):
d.raiseException()
# Make the thread pool synchronous.
clock = d.get_clock()
pool = d.get_db_pool()
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
class ThreadPool:
"""
Threadless thread pool.
"""
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
clock._reactor.callLater(0, d.callback, True)
return d
clock.threadpool = ThreadPool()
pool.threadpool = ThreadPool()
pool.running = True
return d
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return (clock, hs_clock)
@attr.s
class FakeTransport(object):
"""
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
"""
other = attr.ib()
"""The Protocol object which will receive any data written to this transport.
:type: twisted.internet.interfaces.IProtocol
"""
_reactor = attr.ib()
"""Test reactor
:type: twisted.internet.interfaces.IReactorTime
"""
disconnecting = False
buffer = attr.ib(default=b'')
producer = attr.ib(default=None)
def getPeer(self):
return None
def getHost(self):
return None
def loseConnection(self):
self.disconnecting = True
def abortConnection(self):
self.disconnecting = True
def pauseProducing(self):
self.producer.pauseProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
d = self.producer.resumeProducing()
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
self.buffer = self.buffer + byt
def _write():
if getattr(self.other, "transport") is not None:
self.other.dataReceived(self.buffer)
self.buffer = b""
return
self._reactor.callLater(0.0, _write)
_write()
def writeSequence(self, seq):
for x in seq:
self.write(x)
|
get_all_concentrations
|
Get all entries that have concentration values
Args:
projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.
Returns:
(list): all results that meet the constraint.
|
from datanator_query_python.util import mongo_util
from pymongo.collation import Collation, CollationStrength
class QueryXmdb:
def __init__(self, username=None, password=None, server=None, authSource='admin',
database='datanator', max_entries=float('inf'), verbose=True, collection_str='ecmdb',
readPreference='nearest', replicaSet=None):
self.mongo_manager = mongo_util.MongoUtil(MongoDB=server, username=username,
password=password, authSource=authSource, db=database,
readPreference=readPreference, replicaSet=replicaSet)
self.collation = Collation(locale='en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
self.client, self.db, self.collection = self.mongo_manager.con_db(collection_str)
self.collection_str = collection_str
# MASKED: get_all_concentrations function (lines 19-34)
def get_name_by_inchikey(self, inchikey):
"""Get metabolite's name by its inchikey
Args:
inchikey (:obj:`str`): inchi key of metabolite
Return:
(:obj:`str`): name of metabolite
"""
query = {'inchikey': inchikey}
projection = {'_id': 0, 'name': 1}
doc = self.collection.find_one(filter=query, projection=projection, collation=self.collation)
if doc is None:
return 'No metabolite found.'
else:
return doc['name']
def get_standard_ids_by_id(self, _id):
"""Get chebi_id, pubmed_id, and kegg_id from
database specific id.
Args:
_id (:obj:`str`): Database specific ID.
Return:
(:obj:`dict`): Dictionary containing the information.
"""
if self.collection_str == 'ecmdb':
db_id = 'm2m_id'
else:
db_id = 'ymdb_id'
query = {db_id: _id}
# projection = {'hmdb_id': 1, 'chebi_id': 1, 'kegg_id': 1, '_id': 0}
doc = self.collection.find_one(filter=query)
if doc is None:
return {}
else:
return doc
|
def get_all_concentrations(self, projection={'_id': 0, 'inchi': 1,
'inchikey': 1, 'smiles': 1, 'name': 1}):
"""Get all entries that have concentration values
Args:
projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.
Returns:
(list): all results that meet the constraint.
"""
result = []
query = {'concentrations': {'$ne': None} }
docs = self.collection.find(filter=query, projection=projection)
for doc in docs:
result.append(doc)
return result
| 19
| 34
|
from datanator_query_python.util import mongo_util
from pymongo.collation import Collation, CollationStrength
class QueryXmdb:
def __init__(self, username=None, password=None, server=None, authSource='admin',
database='datanator', max_entries=float('inf'), verbose=True, collection_str='ecmdb',
readPreference='nearest', replicaSet=None):
self.mongo_manager = mongo_util.MongoUtil(MongoDB=server, username=username,
password=password, authSource=authSource, db=database,
readPreference=readPreference, replicaSet=replicaSet)
self.collation = Collation(locale='en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
self.client, self.db, self.collection = self.mongo_manager.con_db(collection_str)
self.collection_str = collection_str
def get_all_concentrations(self, projection={'_id': 0, 'inchi': 1,
'inchikey': 1, 'smiles': 1, 'name': 1}):
"""Get all entries that have concentration values
Args:
projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.
Returns:
(list): all results that meet the constraint.
"""
result = []
query = {'concentrations': {'$ne': None} }
docs = self.collection.find(filter=query, projection=projection)
for doc in docs:
result.append(doc)
return result
def get_name_by_inchikey(self, inchikey):
"""Get metabolite's name by its inchikey
Args:
inchikey (:obj:`str`): inchi key of metabolite
Return:
(:obj:`str`): name of metabolite
"""
query = {'inchikey': inchikey}
projection = {'_id': 0, 'name': 1}
doc = self.collection.find_one(filter=query, projection=projection, collation=self.collation)
if doc is None:
return 'No metabolite found.'
else:
return doc['name']
def get_standard_ids_by_id(self, _id):
"""Get chebi_id, pubmed_id, and kegg_id from
database specific id.
Args:
_id (:obj:`str`): Database specific ID.
Return:
(:obj:`dict`): Dictionary containing the information.
"""
if self.collection_str == 'ecmdb':
db_id = 'm2m_id'
else:
db_id = 'ymdb_id'
query = {db_id: _id}
# projection = {'hmdb_id': 1, 'chebi_id': 1, 'kegg_id': 1, '_id': 0}
doc = self.collection.find_one(filter=query)
if doc is None:
return {}
else:
return doc
|
get_name_by_inchikey
|
Get metabolite's name by its inchikey
Args:
inchikey (:obj:`str`): inchi key of metabolite
Return:
(:obj:`str`): name of metabolite
|
from datanator_query_python.util import mongo_util
from pymongo.collation import Collation, CollationStrength
class QueryXmdb:
def __init__(self, username=None, password=None, server=None, authSource='admin',
database='datanator', max_entries=float('inf'), verbose=True, collection_str='ecmdb',
readPreference='nearest', replicaSet=None):
self.mongo_manager = mongo_util.MongoUtil(MongoDB=server, username=username,
password=password, authSource=authSource, db=database,
readPreference=readPreference, replicaSet=replicaSet)
self.collation = Collation(locale='en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
self.client, self.db, self.collection = self.mongo_manager.con_db(collection_str)
self.collection_str = collection_str
def get_all_concentrations(self, projection={'_id': 0, 'inchi': 1,
'inchikey': 1, 'smiles': 1, 'name': 1}):
"""Get all entries that have concentration values
Args:
projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.
Returns:
(list): all results that meet the constraint.
"""
result = []
query = {'concentrations': {'$ne': None} }
docs = self.collection.find(filter=query, projection=projection)
for doc in docs:
result.append(doc)
return result
# MASKED: get_name_by_inchikey function (lines 36-51)
def get_standard_ids_by_id(self, _id):
"""Get chebi_id, pubmed_id, and kegg_id from
database specific id.
Args:
_id (:obj:`str`): Database specific ID.
Return:
(:obj:`dict`): Dictionary containing the information.
"""
if self.collection_str == 'ecmdb':
db_id = 'm2m_id'
else:
db_id = 'ymdb_id'
query = {db_id: _id}
# projection = {'hmdb_id': 1, 'chebi_id': 1, 'kegg_id': 1, '_id': 0}
doc = self.collection.find_one(filter=query)
if doc is None:
return {}
else:
return doc
|
def get_name_by_inchikey(self, inchikey):
"""Get metabolite's name by its inchikey
Args:
inchikey (:obj:`str`): inchi key of metabolite
Return:
(:obj:`str`): name of metabolite
"""
query = {'inchikey': inchikey}
projection = {'_id': 0, 'name': 1}
doc = self.collection.find_one(filter=query, projection=projection, collation=self.collation)
if doc is None:
return 'No metabolite found.'
else:
return doc['name']
| 36
| 51
|
from datanator_query_python.util import mongo_util
from pymongo.collation import Collation, CollationStrength
class QueryXmdb:
def __init__(self, username=None, password=None, server=None, authSource='admin',
database='datanator', max_entries=float('inf'), verbose=True, collection_str='ecmdb',
readPreference='nearest', replicaSet=None):
self.mongo_manager = mongo_util.MongoUtil(MongoDB=server, username=username,
password=password, authSource=authSource, db=database,
readPreference=readPreference, replicaSet=replicaSet)
self.collation = Collation(locale='en', strength=CollationStrength.SECONDARY)
self.max_entries = max_entries
self.verbose = verbose
self.client, self.db, self.collection = self.mongo_manager.con_db(collection_str)
self.collection_str = collection_str
def get_all_concentrations(self, projection={'_id': 0, 'inchi': 1,
'inchikey': 1, 'smiles': 1, 'name': 1}):
"""Get all entries that have concentration values
Args:
projection (dict, optional): mongodb query projection. Defaults to {'_id': 0, 'inchi': 1,'inchikey': 1, 'smiles': 1, 'name': 1}.
Returns:
(list): all results that meet the constraint.
"""
result = []
query = {'concentrations': {'$ne': None} }
docs = self.collection.find(filter=query, projection=projection)
for doc in docs:
result.append(doc)
return result
def get_name_by_inchikey(self, inchikey):
"""Get metabolite's name by its inchikey
Args:
inchikey (:obj:`str`): inchi key of metabolite
Return:
(:obj:`str`): name of metabolite
"""
query = {'inchikey': inchikey}
projection = {'_id': 0, 'name': 1}
doc = self.collection.find_one(filter=query, projection=projection, collation=self.collation)
if doc is None:
return 'No metabolite found.'
else:
return doc['name']
def get_standard_ids_by_id(self, _id):
"""Get chebi_id, pubmed_id, and kegg_id from
database specific id.
Args:
_id (:obj:`str`): Database specific ID.
Return:
(:obj:`dict`): Dictionary containing the information.
"""
if self.collection_str == 'ecmdb':
db_id = 'm2m_id'
else:
db_id = 'ymdb_id'
query = {db_id: _id}
# projection = {'hmdb_id': 1, 'chebi_id': 1, 'kegg_id': 1, '_id': 0}
doc = self.collection.find_one(filter=query)
if doc is None:
return {}
else:
return doc
|
__call__
|
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
from typing import List, Optional, Union
import torch
from detectron2.config import configurable
from . import detection_utils as utils
from . import transforms as T
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapper"]
class DatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = utils.build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
recompute_boxes = cfg.MODEL.MASK_ON
else:
recompute_boxes = False
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"instance_mask_format": cfg.INPUT.MASK_FORMAT,
"use_keypoint": cfg.MODEL.KEYPOINT_ON,
"recompute_boxes": recompute_boxes,
}
if cfg.MODEL.KEYPOINT_ON:
ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
if cfg.MODEL.LOAD_PROPOSALS:
ret["precomputed_proposal_topk"] = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
return ret
# MASKED: __call__ function (lines 115-187)
|
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
# USER: Remove if you don't do semantic/panoptic segmentation.
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
transforms = self.augmentations(aug_input)
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
# USER: Remove if you don't use pre-computed proposals.
# Most users would not need this feature.
if self.proposal_topk is not None:
utils.transform_proposals(
dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
)
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
# dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.use_instance_mask:
anno.pop("segmentation", None)
if not self.use_keypoint:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.instance_mask_format
)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
# the intersection of original bounding box and the cropping box.
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
| 115
| 187
|
# Copyright (c) Facebook, Inc. and its affiliates.
import copy
import logging
import numpy as np
from typing import List, Optional, Union
import torch
from detectron2.config import configurable
from . import detection_utils as utils
from . import transforms as T
"""
This file contains the default mapping that's applied to "dataset dicts".
"""
__all__ = ["DatasetMapper"]
class DatasetMapper:
"""
A callable which takes a dataset dict in Detectron2 Dataset format,
and map it into a format used by the model.
This is the default callable to be used to map your dataset dict into training data.
You may need to follow it to implement your own one for customized logic,
such as a different way to read or transform images.
See :doc:`/tutorials/data_loading` for details.
The callable currently does the following:
1. Read the image from "file_name"
2. Applies cropping/geometric transforms to the image and annotations
3. Prepare data and annotations to Tensor and :class:`Instances`
"""
@configurable
def __init__(
self,
is_train: bool,
*,
augmentations: List[Union[T.Augmentation, T.Transform]],
image_format: str,
use_instance_mask: bool = False,
use_keypoint: bool = False,
instance_mask_format: str = "polygon",
keypoint_hflip_indices: Optional[np.ndarray] = None,
precomputed_proposal_topk: Optional[int] = None,
recompute_boxes: bool = False,
):
"""
NOTE: this interface is experimental.
Args:
is_train: whether it's used in training or inference
augmentations: a list of augmentations or deterministic transforms to apply
image_format: an image format supported by :func:`detection_utils.read_image`.
use_instance_mask: whether to process instance segmentation annotations, if available
use_keypoint: whether to process keypoint annotations if available
instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
masks into this format.
keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
precomputed_proposal_topk: if given, will load pre-computed
proposals from dataset_dict and keep the top k proposals for each image.
recompute_boxes: whether to overwrite bounding box annotations
by computing tight bounding boxes from instance mask annotations.
"""
if recompute_boxes:
assert use_instance_mask, "recompute_boxes requires instance masks"
# fmt: off
self.is_train = is_train
self.augmentations = T.AugmentationList(augmentations)
self.image_format = image_format
self.use_instance_mask = use_instance_mask
self.instance_mask_format = instance_mask_format
self.use_keypoint = use_keypoint
self.keypoint_hflip_indices = keypoint_hflip_indices
self.proposal_topk = precomputed_proposal_topk
self.recompute_boxes = recompute_boxes
# fmt: on
logger = logging.getLogger(__name__)
mode = "training" if is_train else "inference"
logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
@classmethod
def from_config(cls, cfg, is_train: bool = True):
augs = utils.build_augmentation(cfg, is_train)
if cfg.INPUT.CROP.ENABLED and is_train:
augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
recompute_boxes = cfg.MODEL.MASK_ON
else:
recompute_boxes = False
ret = {
"is_train": is_train,
"augmentations": augs,
"image_format": cfg.INPUT.FORMAT,
"use_instance_mask": cfg.MODEL.MASK_ON,
"instance_mask_format": cfg.INPUT.MASK_FORMAT,
"use_keypoint": cfg.MODEL.KEYPOINT_ON,
"recompute_boxes": recompute_boxes,
}
if cfg.MODEL.KEYPOINT_ON:
ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
if cfg.MODEL.LOAD_PROPOSALS:
ret["precomputed_proposal_topk"] = (
cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
if is_train
else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
)
return ret
def __call__(self, dataset_dict):
"""
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a format that builtin models in detectron2 accept
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
# USER: Write your own image loading if it's not from a file
image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
utils.check_image_size(dataset_dict, image)
# USER: Remove if you don't do semantic/panoptic segmentation.
if "sem_seg_file_name" in dataset_dict:
sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
else:
sem_seg_gt = None
aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
transforms = self.augmentations(aug_input)
image, sem_seg_gt = aug_input.image, aug_input.sem_seg
image_shape = image.shape[:2] # h, w
# Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
# but not efficient on large generic data structures due to the use of pickle & mp.Queue.
# Therefore it's important to use torch.Tensor.
dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
if sem_seg_gt is not None:
dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
# USER: Remove if you don't use pre-computed proposals.
# Most users would not need this feature.
if self.proposal_topk is not None:
utils.transform_proposals(
dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
)
if not self.is_train:
# USER: Modify this if you want to keep them for some reason.
# dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
# USER: Modify this if you want to keep them for some reason.
for anno in dataset_dict["annotations"]:
if not self.use_instance_mask:
anno.pop("segmentation", None)
if not self.use_keypoint:
anno.pop("keypoints", None)
# USER: Implement additional transformations if you have other types of data
annos = [
utils.transform_instance_annotations(
obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
)
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
instances = utils.annotations_to_instances(
annos, image_shape, mask_format=self.instance_mask_format
)
# After transforms such as cropping are applied, the bounding box may no longer
# tightly bound the object. As an example, imagine a triangle object
# [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
# bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
# the intersection of original bounding box and the cropping box.
if self.recompute_boxes:
instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
dataset_dict["instances"] = utils.filter_empty_instances(instances)
return dataset_dict
|
shift_decoder
|
Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
# MASKED: shift_decoder function (lines 24-42)
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
| 24
| 42
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
double_decoding
|
Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
# MASKED: double_decoding function (lines 45-67)
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
| 45
| 67
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
__init__
|
Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
# MASKED: __init__ function (lines 129-179)
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
| 129
| 179
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
__iadd__
|
In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
# MASKED: __iadd__ function (lines 181-202)
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
| 181
| 202
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
__add__
|
Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
# MASKED: __add__ function (lines 204-214)
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
| 204
| 214
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
__imul__
|
In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
# MASKED: __imul__ function (lines 216-261)
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
| 216
| 261
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
__mul__
|
Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
# MASKED: __mul__ function (lines 263-275)
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
| 263
| 275
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Binary code class for Fermion-qubit mappings (arXiv:1712.07067) """
import copy
import numpy
import scipy
import scipy.sparse
from openfermion.ops import BinaryPolynomial
def shift_decoder(decoder, shift_constant):
""" Shifts the indices of a decoder by a constant.
Args:
decoder (iterable): list of BinaryPolynomial; the decoder
shift_constant (int): the qubit index that corresponds to the offset.
Returns (list): list of BinaryPolynomial shifted decoder
"""
decode_shifted = []
if not isinstance(shift_constant, (numpy.int64, numpy.int32, int)):
raise TypeError('the shift to the decoder must be integer. got {}'
'of type {}'.format(shift_constant,
type(shift_constant)))
for entry in decoder:
tmp_entry = copy.deepcopy(entry)
tmp_entry.shift(shift_constant)
decode_shifted.append(tmp_entry)
return decode_shifted
def double_decoding(decoder_1, decoder_2):
""" Concatenates two decodings
Args:
decoder_1 (iterable): list of BinaryPolynomial
decoding of the outer code layer
decoder_2 (iterable): list of BinaryPolynomial
decoding of the inner code layer
Returns (list): list of BinaryPolynomial the decoding defined by
w -> decoder_1( decoder_2(w) )
"""
doubled_decoder = []
for entry in decoder_1:
tmp_sum = 0
for summand in entry.terms:
tmp_term = BinaryPolynomial('1')
for factor in summand:
if isinstance(factor, (numpy.int32, numpy.int64, int)):
tmp_term *= decoder_2[factor]
tmp_sum = tmp_term + tmp_sum
doubled_decoder += [tmp_sum]
return doubled_decoder
class BinaryCodeError(Exception):
pass
class BinaryCode(object):
"""The BinaryCode class provides a representation of an encoding-decoding
pair for binary vectors of different lengths, where the decoding is allowed
to be non-linear.
As the occupation number of fermionic mode is effectively binary,
a length-N vector (v) of binary number can be utilized to describe
a configuration of a many-body fermionic state on N modes.
An n-qubit product state configuration \|w0> \|w1> \|w2> ... \|wn-1>,
on the other hand is described by a length-n binary vector
w=(w0, w1, ..., wn-1). To map a subset of N-Orbital Fermion states
to n-qubit states we define a binary code, which consists of a
(here: linear) encoding (e) and a (non-linear) decoding (d), such
that for every v from that subset, w = e(v) is a length-n binary
vector with d(w) = v. This can be used to save qubits given a
Hamiltonian that dictates such a subset, otherwise n=N.
Two binary codes (e,d) and (e',d') can construct a third code (e",d")
by two possible operations:
Concatenation: (e",d") = (e,d) * (e',d')
which means e": v" -> e'( e(v") ) and d": w" -> d( d'(w") )
where n" = n' and N" = N, with n = N' as necessary condition.
Appendage: (e",d") = (e,d) + (e',d')
which means e": (v + v') -> e(v) + e'(v') and d": (w + w') -> d(w) + d'(
w')
where the addition is to be understood as appending two vectors together,
so N" = N' + N and n" = n + n'.
Appending codes is particularly useful when considering segment codes or
segmented transforms.
A BinaryCode-instance is initialized by BinaryCode(A,d),
given the encoding (e) as n x N array or matrix-like nested lists A,
such that e(v) = (A v) mod 2. The decoding d is an array or a list
input of length N, which has entries either of type BinaryPolynomial, or of
valid type for an input of the BinaryPolynomial-constructor.
The signs + and \*, += and \*= are overloaded to implement concatenation
and appendage on BinaryCode-objects.
NOTE: multiplication of a BinaryCode with an integer yields a
multiple appending of the same code, the multiplication with another
BinaryCode their concatenation.
Attributes:
decoder (list): list of BinaryPolynomial: Outputs the decoding
functions as components.
encoder (scipy.sparse.csc_matrix): Outputs A, the linear matrix that
implements the encoding function.
n_modes (int): Outputs the number of modes.
n_qubits (int): Outputs the number of qubits.
"""
def __init__(self, encoding, decoding):
""" Initialization of a binary code.
Args:
encoding (np.ndarray or list): nested lists or binary 2D-array
decoding (array or list): list of BinaryPolynomial (list or str).
Raises:
TypeError: non-list, array like encoding or decoding, unsuitable
BinaryPolynomial generators,
BinaryCodeError: in case of decoder/encoder size mismatch or
decoder size, qubits indexed mismatch
"""
if not isinstance(encoding, (numpy.ndarray, list)):
raise TypeError('encoding must be a list or array.')
if not isinstance(decoding, (numpy.ndarray, list)):
raise TypeError('decoding must be a list or array.')
self.encoder = scipy.sparse.csc_matrix(encoding)
self.n_qubits, self.n_modes = numpy.shape(encoding)
if self.n_modes != len(decoding):
raise BinaryCodeError(
'size mismatch, decoder and encoder should have the same'
' first dimension')
decoder_qubits = set()
self.decoder = []
for symbolic_binary in decoding:
if isinstance(symbolic_binary, (tuple, list, str, int,
numpy.int32, numpy.int64)):
symbolic_binary = BinaryPolynomial(symbolic_binary)
if isinstance(symbolic_binary, BinaryPolynomial):
self.decoder.append(symbolic_binary)
decoder_qubits = decoder_qubits | set(
symbolic_binary.enumerate_qubits())
else:
raise TypeError(
'decoder component provided '
'is not a suitable for BinaryPolynomial',
symbolic_binary)
if len(decoder_qubits) != self.n_qubits:
raise BinaryCodeError(
'decoder and encoder provided has different number of qubits')
if max(decoder_qubits) + 1 > self.n_qubits:
raise BinaryCodeError('decoder is not indexing some qubits. Qubits'
'indexed are: {}'.format(decoder_qubits))
def __iadd__(self, appendix):
""" In-place appending a binary code with +=.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): A global binary code with size
(n_modes1 + n_modes2), (n_qubits1,n_qubits2)
Raises:
TypeError: Appendix must be a BinaryCode.
"""
if not isinstance(appendix, BinaryCode):
raise TypeError('argument must be a BinaryCode.')
self.decoder = numpy.append(self.decoder,
shift_decoder(appendix.decoder,
self.n_qubits)).tolist()
self.encoder = scipy.sparse.bmat([[self.encoder, None],
[None, appendix.encoder]])
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
def __add__(self, appendix):
"""Appends two binary codes via addition +.
Args:
appendix (BinaryCode): The code to append to the present one.
Returns (BinaryCode): global binary code
"""
twin = copy.deepcopy(self)
twin += appendix
return twin
def __imul__(self, factor):
"""In-place code concatenation or appendage via *= .
Multiplication with integer will yield appendage, otherwise
concatenation.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
Raises:
TypeError: factor must be an integer or a BinaryCode
BinaryCodeError: size mismatch between self and factor
ValueError: in case of an integer factor that is < 1
"""
if not isinstance(factor, (BinaryCode, numpy.int32, numpy.int64, int)):
raise TypeError('argument must be a BinaryCode or integer')
if isinstance(factor, BinaryCode):
if self.n_qubits != factor.n_modes:
raise BinaryCodeError(
'size mismatch between inner and outer code layer')
self.decoder = double_decoding(self.decoder, factor.decoder)
self.encoder = factor.encoder.dot(self.encoder)
self.n_qubits, self.n_modes = numpy.shape(self.encoder)
return self
elif isinstance(factor, (numpy.int32, numpy.int64, int)):
if factor < 1:
raise ValueError('integer factor has to be positive, '
'non-zero ')
self.encoder = scipy.sparse.kron(
scipy.sparse.identity(factor, format='csc', dtype=int),
self.encoder, 'csc')
tmp_decoder = self.decoder
for index in numpy.arange(1, factor):
self.decoder = numpy.append(self.decoder,
shift_decoder(tmp_decoder,
index *
self.n_qubits))
self.n_qubits *= factor
self.n_modes *= factor
return self
def __mul__(self, factor):
""" Concatenation of two codes or appendage the same code factor times
in case of integer factor.
Args:
factor (int or BinaryCode): the BinaryCode to concatenate. In case
of int, it will append the code to itself factor times.
Returns (BinaryCode): segmented or concatenated code
"""
twin = copy.deepcopy(self)
twin *= factor
return twin
def __rmul__(self, factor):
""" Appending the same code factor times.
Args:
factor (int): integer defining number of appendages.
Returns (BinaryCode): Segmented code.
Raises:
TypeError: factor must be an integer
"""
if isinstance(factor, (numpy.int32, numpy.int64, int)):
return self * factor
else:
raise TypeError('the left multiplier must be an integer to a'
'BinaryCode. Was given {} of '
'type {}'.format(factor, type(factor)))
def __str__(self):
""" Return an easy-to-read string representation."""
string_return = [list(map(list, self.encoder.toarray()))]
dec_str = '['
for term in self.decoder:
dec_str += term.__str__() + ','
dec_str = dec_str[:-1]
string_return.append(dec_str + ']')
return str(string_return)
def __repr__(self):
return str(self)
|
feedback
|
Stores the feedback for a recommended post. Will return a information object on success and an empty object on failure.
Think about returning 409-Conflict on failure instead, because the empty object can cause an issue in engine service.
|
from concurrent.futures import ThreadPoolExecutor, CancelledError
from aiomysql import create_pool
from asyncio import ensure_future, gather, sleep
from pymysql.err import OperationalError
from logging import getLogger
from sanic import Sanic
from sanic.request import Request
from sanic.response import json
from sanic.exceptions import abort
from kiwi.database.DataAccessor import DataAccessor
from kiwi.Recommender import Recommender
from kiwi.config import read_mysql_config, read_config
from kiwi.TransferTypes import create_vote
from kiwi.AsyncContentWrapper import AsyncContentWrapper
from kiwi.ContentEngine import ContentEngine
from kiwi.ActivationCalculator import ActivationCalculator
import time
app = Sanic(__name__)
def create_accessor(context):
return DataAccessor(conn=context)
async def repeated_pool(loop, sleeper, tries):
n = 1
while n <= tries:
try:
return await create_pool(**read_mysql_config()._asdict(),
autocommit=True,
loop=loop,
pool_recycle=600)
except OperationalError as e:
getLogger('root').warn(e)
getLogger('root').warn("Waiting {}s before retry".format(sleeper))
await sleep(sleeper)
n += 1
return await create_pool(**read_mysql_config()._asdict(),
autocommit=True,
loop=loop,
pool_recycle=600)
async def retrain(context, loop):
print("Start training...")
start = time.time()
async with context.pool.acquire() as conn:
accessor = create_accessor(conn)
content_frame = await accessor.get_content_frame()
rating_frame = await accessor.get_vote_frame()
print("Collected data in {}".format(time.time() - start))
algorithm = ContentEngine(
content_frame,
rating_frame)
predictor = AsyncContentWrapper(
loop, context.executor, algorithm)
await predictor.fit()
print("Completed training in {}s".format(time.time() - start))
context.algorithm = algorithm
context.predictor = predictor
@app.listener("before_server_start")
async def setup(context, loop):
context.executor = ThreadPoolExecutor()
context.pool = await repeated_pool(loop, 5, 10)
await retrain(context, loop)
@app.middleware("request")
async def generate_accessor(request):
request['conn'] = await app.pool.acquire()
request['accessor'] = create_accessor(request['conn'])
@app.middleware("response")
async def teardown_accessor(request, response):
await request['conn'].ensure_closed()
app.pool.release(request['conn'])
@app.listener("before_server_stop")
async def teardown(context, loop):
context.run_retrain = False
context.executor.shutdown()
context.pool.close()
await context.pool.wait_closed()
@app.get('/recommendation')
async def recommend(request):
'''
Gets recommendations for user
Expects args in query string form -> user=x&count=n
Returns json object {posts, unvoted, user, meta}
'''
args = request.raw_args
recommender = Recommender(
app.predictor, request['accessor'], read_config())
posts = await recommender.recommend_for(args['user'],
int(args.get('count', 10)))
return json(posts)
# MASKED: feedback function (lines 106-119)
@app.post('/content')
async def content(request: Request):
'''
Inserts posts into the database. The request needs the format
{ "posts": [{"id": string, "tags": string}]}.
Returns the amout of inserted items and 200-OK.
'''
filtered_posts = [(post['id'], post['tags'])
for post in request.json['posts']]
inserted = await request['accessor'].add_content(filtered_posts)
if inserted > 0:
ensure_future(retrain(app, app.loop))
return json({"inserted_count": inserted})
@app.get('/predict')
async def predict(request: Request):
recommender = Recommender(
app.predictor, request['accessor'], read_config())
user = request.raw_args['user']
item = request.raw_args['item']
result = await recommender.predict(user, item)
return json(result)
@app.get('/activation')
async def activation(request: Request):
'''
Returns the activation value for the given set of heuristics
'''
heuristics = request.json['heuristics']
try:
utv = await app.predictor.get_user_taste_vector(heuristics["user"])
except KeyError:
utv = None
ac = ActivationCalculator(heuristics, request['accessor'])
a = await ac.get_activation(utv)
return json({"activation": a, 'received_heuristics': heuristics})
@app.post('/training')
async def training(request: Request):
votes = request.json['votes']
config = read_config()
do_retrain = request.json.get('retrain', False)
inserted_user = await request['accessor'].batch_register_users(
{str(vote[0]) for vote in votes})
inserted = await request['accessor'].insert_votes(
(str(vote[0]), str(vote[1]), 1 if float(vote[2]) > config['positive_cutoff'] else -1) for vote in votes)
if do_retrain:
ensure_future(retrain(app, app.loop))
return json({
'inserted_users': inserted_user,
'inserted_votes': inserted})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
|
@app.post('/feedback')
async def feedback(request: Request):
'''Stores the feedback for a recommended post. Will return a information object on success and an empty object on failure.
Think about returning 409-Conflict on failure instead, because the empty object can cause an issue in engine service.'''
vote = request.json['vote']
config = read_config()
recommender = Recommender(
app.predictor, request['accessor'], config)
try:
vote_result = await recommender.store_feedback(
create_vote(vote, config['positive_cutoff']))
return json(vote_result)
except KeyError:
abort(400, "Unknown user")
| 106
| 119
|
from concurrent.futures import ThreadPoolExecutor, CancelledError
from aiomysql import create_pool
from asyncio import ensure_future, gather, sleep
from pymysql.err import OperationalError
from logging import getLogger
from sanic import Sanic
from sanic.request import Request
from sanic.response import json
from sanic.exceptions import abort
from kiwi.database.DataAccessor import DataAccessor
from kiwi.Recommender import Recommender
from kiwi.config import read_mysql_config, read_config
from kiwi.TransferTypes import create_vote
from kiwi.AsyncContentWrapper import AsyncContentWrapper
from kiwi.ContentEngine import ContentEngine
from kiwi.ActivationCalculator import ActivationCalculator
import time
app = Sanic(__name__)
def create_accessor(context):
return DataAccessor(conn=context)
async def repeated_pool(loop, sleeper, tries):
n = 1
while n <= tries:
try:
return await create_pool(**read_mysql_config()._asdict(),
autocommit=True,
loop=loop,
pool_recycle=600)
except OperationalError as e:
getLogger('root').warn(e)
getLogger('root').warn("Waiting {}s before retry".format(sleeper))
await sleep(sleeper)
n += 1
return await create_pool(**read_mysql_config()._asdict(),
autocommit=True,
loop=loop,
pool_recycle=600)
async def retrain(context, loop):
print("Start training...")
start = time.time()
async with context.pool.acquire() as conn:
accessor = create_accessor(conn)
content_frame = await accessor.get_content_frame()
rating_frame = await accessor.get_vote_frame()
print("Collected data in {}".format(time.time() - start))
algorithm = ContentEngine(
content_frame,
rating_frame)
predictor = AsyncContentWrapper(
loop, context.executor, algorithm)
await predictor.fit()
print("Completed training in {}s".format(time.time() - start))
context.algorithm = algorithm
context.predictor = predictor
@app.listener("before_server_start")
async def setup(context, loop):
context.executor = ThreadPoolExecutor()
context.pool = await repeated_pool(loop, 5, 10)
await retrain(context, loop)
@app.middleware("request")
async def generate_accessor(request):
request['conn'] = await app.pool.acquire()
request['accessor'] = create_accessor(request['conn'])
@app.middleware("response")
async def teardown_accessor(request, response):
await request['conn'].ensure_closed()
app.pool.release(request['conn'])
@app.listener("before_server_stop")
async def teardown(context, loop):
context.run_retrain = False
context.executor.shutdown()
context.pool.close()
await context.pool.wait_closed()
@app.get('/recommendation')
async def recommend(request):
'''
Gets recommendations for user
Expects args in query string form -> user=x&count=n
Returns json object {posts, unvoted, user, meta}
'''
args = request.raw_args
recommender = Recommender(
app.predictor, request['accessor'], read_config())
posts = await recommender.recommend_for(args['user'],
int(args.get('count', 10)))
return json(posts)
@app.post('/feedback')
async def feedback(request: Request):
'''Stores the feedback for a recommended post. Will return a information object on success and an empty object on failure.
Think about returning 409-Conflict on failure instead, because the empty object can cause an issue in engine service.'''
vote = request.json['vote']
config = read_config()
recommender = Recommender(
app.predictor, request['accessor'], config)
try:
vote_result = await recommender.store_feedback(
create_vote(vote, config['positive_cutoff']))
return json(vote_result)
except KeyError:
abort(400, "Unknown user")
@app.post('/content')
async def content(request: Request):
'''
Inserts posts into the database. The request needs the format
{ "posts": [{"id": string, "tags": string}]}.
Returns the amout of inserted items and 200-OK.
'''
filtered_posts = [(post['id'], post['tags'])
for post in request.json['posts']]
inserted = await request['accessor'].add_content(filtered_posts)
if inserted > 0:
ensure_future(retrain(app, app.loop))
return json({"inserted_count": inserted})
@app.get('/predict')
async def predict(request: Request):
recommender = Recommender(
app.predictor, request['accessor'], read_config())
user = request.raw_args['user']
item = request.raw_args['item']
result = await recommender.predict(user, item)
return json(result)
@app.get('/activation')
async def activation(request: Request):
'''
Returns the activation value for the given set of heuristics
'''
heuristics = request.json['heuristics']
try:
utv = await app.predictor.get_user_taste_vector(heuristics["user"])
except KeyError:
utv = None
ac = ActivationCalculator(heuristics, request['accessor'])
a = await ac.get_activation(utv)
return json({"activation": a, 'received_heuristics': heuristics})
@app.post('/training')
async def training(request: Request):
votes = request.json['votes']
config = read_config()
do_retrain = request.json.get('retrain', False)
inserted_user = await request['accessor'].batch_register_users(
{str(vote[0]) for vote in votes})
inserted = await request['accessor'].insert_votes(
(str(vote[0]), str(vote[1]), 1 if float(vote[2]) > config['positive_cutoff'] else -1) for vote in votes)
if do_retrain:
ensure_future(retrain(app, app.loop))
return json({
'inserted_users': inserted_user,
'inserted_votes': inserted})
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8000)
|
allocate_buffers
|
Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
|
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility functions for building/saving/loading TensorRT Engine
import sys
import os
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
from utils.modeldata import ModelData
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
from common import HostDeviceMem
# MASKED: allocate_buffers function (lines 39-81)
def build_engine(uff_model_path, trt_logger, trt_engine_datatype=trt.DataType.FLOAT, batch_size=1, silent=False):
with trt.Builder(trt_logger) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.UffParser() as parser, trt.Runtime(trt_logger) as runtime:
config.max_workspace_size = 1 << 30
if trt_engine_datatype == trt.DataType.HALF:
config.set_flag(trt.BuilderFlag.FP16)
builder.max_batch_size = batch_size
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output("MarkOutput_0")
parser.parse(uff_model_path, network)
if not silent:
print("Building TensorRT engine. This may take few minutes.")
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
def save_engine(engine, engine_dest_path):
buf = engine.serialize()
with open(engine_dest_path, 'wb') as f:
f.write(buf)
def load_engine(trt_runtime, engine_path):
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
|
def allocate_buffers(engine):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32}
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = binding_to_type[str(binding)]
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
| 39
| 81
|
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility functions for building/saving/loading TensorRT Engine
import sys
import os
import tensorrt as trt
import pycuda.driver as cuda
import numpy as np
from utils.modeldata import ModelData
# ../../common.py
sys.path.insert(1,
os.path.join(
os.path.dirname(os.path.realpath(__file__)),
os.pardir,
os.pardir
)
)
from common import HostDeviceMem
def allocate_buffers(engine):
"""Allocates host and device buffer for TRT engine inference.
This function is similair to the one in ../../common.py, but
converts network outputs (which are np.float32) appropriately
before writing them to Python buffer. This is needed, since
TensorRT plugins doesn't support output type description, and
in our particular case, we use NMS plugin as network output.
Args:
engine (trt.ICudaEngine): TensorRT engine
Returns:
inputs [HostDeviceMem]: engine input memory
outputs [HostDeviceMem]: engine output memory
bindings [int]: buffer to device bindings
stream (cuda.Stream): cuda stream for engine inference synchronization
"""
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
# Current NMS implementation in TRT only supports DataType.FLOAT but
# it may change in the future, which could brake this sample here
# when using lower precision [e.g. NMS output would not be np.float32
# anymore, even though this is assumed in binding_to_type]
binding_to_type = {"Input": np.float32, "NMS": np.float32, "NMS_1": np.int32}
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
dtype = binding_to_type[str(binding)]
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
def build_engine(uff_model_path, trt_logger, trt_engine_datatype=trt.DataType.FLOAT, batch_size=1, silent=False):
with trt.Builder(trt_logger) as builder, builder.create_network() as network, builder.create_builder_config() as config, trt.UffParser() as parser, trt.Runtime(trt_logger) as runtime:
config.max_workspace_size = 1 << 30
if trt_engine_datatype == trt.DataType.HALF:
config.set_flag(trt.BuilderFlag.FP16)
builder.max_batch_size = batch_size
parser.register_input(ModelData.INPUT_NAME, ModelData.INPUT_SHAPE)
parser.register_output("MarkOutput_0")
parser.parse(uff_model_path, network)
if not silent:
print("Building TensorRT engine. This may take few minutes.")
plan = builder.build_serialized_network(network, config)
return runtime.deserialize_cuda_engine(plan)
def save_engine(engine, engine_dest_path):
buf = engine.serialize()
with open(engine_dest_path, 'wb') as f:
f.write(buf)
def load_engine(trt_runtime, engine_path):
with open(engine_path, 'rb') as f:
engine_data = f.read()
engine = trt_runtime.deserialize_cuda_engine(engine_data)
return engine
|
rainbow_to_vector
|
Convert Rainbow object to np.arrays
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into array format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
rflux : np.array
flux (MJy/sr) [n_wavelengths x n_integrations]
rfluxe : np.array
flux error (MJy/sr) [n_wavelengths x n_integrations]
rtime : np.array
time (BJD_TDB, houra) [n_integrations]
rwavel : np.array
wavelength (microns) [n_wavelengths]
|
from .imports import *
# MASKED: rainbow_to_vector function (lines 4-51)
def rainbow_to_df(r, timeformat='h'):
""" Convert Rainbow object to pandas dataframe
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into pandas df format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
pd.DataFrame
"""
rflux, rfluxe, rtime, rwavel = rainbow_to_vector(r, timeformat)
x, y = np.meshgrid(rtime.to_value(), rwavel.to_value())
rainbow_dict = {f"Time ({timeformat})": x.ravel(), "Wavelength (microns)": y.ravel(), "Flux": rflux.ravel(),
"Flux Error": rfluxe.ravel()}
df = pd.DataFrame(rainbow_dict)
return df
def bin_data(jd, y, mins_jd):
t = np.array(jd)
split = []
sorted_t = t
sorted_y = y
start = sorted_t[0]
nextbin = sorted_t[np.absolute(sorted_t - start) > mins_jd]
while nextbin != []:
start = start + mins_jd
ind_st = np.argmax(sorted_t > start)
if len(split) > 0:
if ind_st != split[-1]:
split.append(ind_st)
time = sorted_t[ind_st:]
# need to add defn for time here?
else:
split.append(ind_st)
time = sorted_t[ind_st:]
nextbin = time[np.absolute(time - start) > mins_jd]
times = np.split(sorted_t, split)
ys = np.split(sorted_y, split)
bins = np.zeros(len(times))
binned_y = np.zeros(len(times))
binned_err = np.zeros(len(times))
for i in range(len(times)):
if len(ys[i]) > 0:
try:
bins[i] = np.nanmean(times[i])
binned_y[i] = np.nanmean(ys[i])
n = len(times[i])
# standard error in the median:
# binned_err[i] = 1.253 * np.nanstd(ys[i]) / np.sqrt(n)
binned_err[i] = np.nanstd(ys[i]) / np.sqrt(n)
except Exception as e:
print(e)
pass
bin_t = bins[binned_y != 0]
bin_e = binned_err[binned_y != 0]
bin_y = binned_y[binned_y != 0]
return bin_t, bin_y, bin_e
def find_nearest(array, value):
# array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def remove_nans(arr_with_nans,*otherarrs):
nanfree_arrs = []
for arr in otherarrs:
nanfree_arrs.append(arr[~np.isnan(arr_with_nans)])
arr_without_nans = arr_with_nans[~np.isnan(arr_with_nans)]
return arr_without_nans, nanfree_arrs
|
def rainbow_to_vector(r, timeformat='h'):
""" Convert Rainbow object to np.arrays
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into array format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
rflux : np.array
flux (MJy/sr) [n_wavelengths x n_integrations]
rfluxe : np.array
flux error (MJy/sr) [n_wavelengths x n_integrations]
rtime : np.array
time (BJD_TDB, houra) [n_integrations]
rwavel : np.array
wavelength (microns) [n_wavelengths]
"""
secondformat = ['second', 'seconds', 'sec', 's']
minuteformat = ['minute', 'minutes', 'min', 'm']
hourformat = ['hour', 'hours', 'h']
dayformat = ['day', 'days', 'd']
yearformat = ['year', 'years', 'y']
rflux = r.fluxlike['flux'] # flux (MJy/sr) : [n_wavelengths x n_integrations]
rfluxe = r.fluxlike['uncertainty'] # flux error (MJy/sr) : [n_wavelengths x n_integrations]
rtime = r.timelike['time'] # time (BJD_TDB, hours) : [n_integrations]
rwavel = r.wavelike['wavelength'] # wavelength (microns) : [n_wavelengths]
# change the time array into the requested format (e.g. seconds, minutes, days etc.)
if timeformat in secondformat:
rtime = rtime * 3600
elif timeformat in minuteformat:
rtime = rtime * 60
elif timeformat in hourformat:
# hours is the default time setting
pass
elif timeformat in dayformat:
rtime = rtime / 24.
elif timeformat in yearformat:
rtime = rtime / (24 * 365.)
else:
warnings.warn("Unrecognised Time Format!")
return
return rflux, rfluxe, rtime, rwavel
| 4
| 51
|
from .imports import *
def rainbow_to_vector(r, timeformat='h'):
""" Convert Rainbow object to np.arrays
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into array format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
rflux : np.array
flux (MJy/sr) [n_wavelengths x n_integrations]
rfluxe : np.array
flux error (MJy/sr) [n_wavelengths x n_integrations]
rtime : np.array
time (BJD_TDB, houra) [n_integrations]
rwavel : np.array
wavelength (microns) [n_wavelengths]
"""
secondformat = ['second', 'seconds', 'sec', 's']
minuteformat = ['minute', 'minutes', 'min', 'm']
hourformat = ['hour', 'hours', 'h']
dayformat = ['day', 'days', 'd']
yearformat = ['year', 'years', 'y']
rflux = r.fluxlike['flux'] # flux (MJy/sr) : [n_wavelengths x n_integrations]
rfluxe = r.fluxlike['uncertainty'] # flux error (MJy/sr) : [n_wavelengths x n_integrations]
rtime = r.timelike['time'] # time (BJD_TDB, hours) : [n_integrations]
rwavel = r.wavelike['wavelength'] # wavelength (microns) : [n_wavelengths]
# change the time array into the requested format (e.g. seconds, minutes, days etc.)
if timeformat in secondformat:
rtime = rtime * 3600
elif timeformat in minuteformat:
rtime = rtime * 60
elif timeformat in hourformat:
# hours is the default time setting
pass
elif timeformat in dayformat:
rtime = rtime / 24.
elif timeformat in yearformat:
rtime = rtime / (24 * 365.)
else:
warnings.warn("Unrecognised Time Format!")
return
return rflux, rfluxe, rtime, rwavel
def rainbow_to_df(r, timeformat='h'):
""" Convert Rainbow object to pandas dataframe
Parameters
----------
r : Rainbow object
chromatic Rainbow object to convert into pandas df format
timeformat : str
(optional, default='hours')
The time format to use (seconds, minutes, hours, days etc.)
Returns
----------
pd.DataFrame
"""
rflux, rfluxe, rtime, rwavel = rainbow_to_vector(r, timeformat)
x, y = np.meshgrid(rtime.to_value(), rwavel.to_value())
rainbow_dict = {f"Time ({timeformat})": x.ravel(), "Wavelength (microns)": y.ravel(), "Flux": rflux.ravel(),
"Flux Error": rfluxe.ravel()}
df = pd.DataFrame(rainbow_dict)
return df
def bin_data(jd, y, mins_jd):
t = np.array(jd)
split = []
sorted_t = t
sorted_y = y
start = sorted_t[0]
nextbin = sorted_t[np.absolute(sorted_t - start) > mins_jd]
while nextbin != []:
start = start + mins_jd
ind_st = np.argmax(sorted_t > start)
if len(split) > 0:
if ind_st != split[-1]:
split.append(ind_st)
time = sorted_t[ind_st:]
# need to add defn for time here?
else:
split.append(ind_st)
time = sorted_t[ind_st:]
nextbin = time[np.absolute(time - start) > mins_jd]
times = np.split(sorted_t, split)
ys = np.split(sorted_y, split)
bins = np.zeros(len(times))
binned_y = np.zeros(len(times))
binned_err = np.zeros(len(times))
for i in range(len(times)):
if len(ys[i]) > 0:
try:
bins[i] = np.nanmean(times[i])
binned_y[i] = np.nanmean(ys[i])
n = len(times[i])
# standard error in the median:
# binned_err[i] = 1.253 * np.nanstd(ys[i]) / np.sqrt(n)
binned_err[i] = np.nanstd(ys[i]) / np.sqrt(n)
except Exception as e:
print(e)
pass
bin_t = bins[binned_y != 0]
bin_e = binned_err[binned_y != 0]
bin_y = binned_y[binned_y != 0]
return bin_t, bin_y, bin_e
def find_nearest(array, value):
# array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def remove_nans(arr_with_nans,*otherarrs):
nanfree_arrs = []
for arr in otherarrs:
nanfree_arrs.append(arr[~np.isnan(arr_with_nans)])
arr_without_nans = arr_with_nans[~np.isnan(arr_with_nans)]
return arr_without_nans, nanfree_arrs
|
train_step
|
train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5.
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn.parallel.distributed import (DistributedDataParallel,
_find_tensors)
from mmcv import print_log
from mmcv.utils import TORCH_VERSION, digit_version
from .scatter_gather import scatter_kwargs
class MMDistributedDataParallel(DistributedDataParallel):
"""The DDP module that supports DataContainer.
MMDDP has two main differences with PyTorch DDP:
- It supports a custom type :class:`DataContainer` which allows more
flexible control of input data.
- It implement two APIs ``train_step()`` and ``val_step()``.
"""
def to_kwargs(self, inputs, kwargs, device_id):
# Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8
# to move all tensors to device_id
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
# MASKED: train_step function (lines 29-83)
def val_step(self, *inputs, **kwargs):
"""val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
|
def train_step(self, *inputs, **kwargs):
"""train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
| 29
| 83
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn.parallel.distributed import (DistributedDataParallel,
_find_tensors)
from mmcv import print_log
from mmcv.utils import TORCH_VERSION, digit_version
from .scatter_gather import scatter_kwargs
class MMDistributedDataParallel(DistributedDataParallel):
"""The DDP module that supports DataContainer.
MMDDP has two main differences with PyTorch DDP:
- It supports a custom type :class:`DataContainer` which allows more
flexible control of input data.
- It implement two APIs ``train_step()`` and ``val_step()``.
"""
def to_kwargs(self, inputs, kwargs, device_id):
# Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8
# to move all tensors to device_id
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
"""train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
def val_step(self, *inputs, **kwargs):
"""val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
|
val_step
|
val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5.
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn.parallel.distributed import (DistributedDataParallel,
_find_tensors)
from mmcv import print_log
from mmcv.utils import TORCH_VERSION, digit_version
from .scatter_gather import scatter_kwargs
class MMDistributedDataParallel(DistributedDataParallel):
"""The DDP module that supports DataContainer.
MMDDP has two main differences with PyTorch DDP:
- It supports a custom type :class:`DataContainer` which allows more
flexible control of input data.
- It implement two APIs ``train_step()`` and ``val_step()``.
"""
def to_kwargs(self, inputs, kwargs, device_id):
# Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8
# to move all tensors to device_id
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
"""train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
# MASKED: val_step function (lines 85-138)
|
def val_step(self, *inputs, **kwargs):
"""val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
| 85
| 138
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn.parallel.distributed import (DistributedDataParallel,
_find_tensors)
from mmcv import print_log
from mmcv.utils import TORCH_VERSION, digit_version
from .scatter_gather import scatter_kwargs
class MMDistributedDataParallel(DistributedDataParallel):
"""The DDP module that supports DataContainer.
MMDDP has two main differences with PyTorch DDP:
- It supports a custom type :class:`DataContainer` which allows more
flexible control of input data.
- It implement two APIs ``train_step()`` and ``val_step()``.
"""
def to_kwargs(self, inputs, kwargs, device_id):
# Use `self.to_kwargs` instead of `self.scatter` in pytorch1.8
# to move all tensors to device_id
return scatter_kwargs(inputs, kwargs, [device_id], dim=self.dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def train_step(self, *inputs, **kwargs):
"""train_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.train_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.train_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.train_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
def val_step(self, *inputs, **kwargs):
"""val_step() API for module wrapped by DistributedDataParallel.
This method is basically the same as
``DistributedDataParallel.forward()``, while replacing
``self.module.forward()`` with ``self.module.val_step()``.
It is compatible with PyTorch 1.1 - 1.5.
"""
# In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
# end of backward to the beginning of forward.
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.7')
and self.reducer._rebuild_buckets()):
print_log(
'Reducer buckets have been rebuilt in this iteration.',
logger='mmcv')
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_pre_fwd():
self._sync_buffers()
else:
if (getattr(self, 'require_forward_param_sync', False)
and self.require_forward_param_sync):
self._sync_params()
if self.device_ids:
inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
if len(self.device_ids) == 1:
output = self.module.val_step(*inputs[0], **kwargs[0])
else:
outputs = self.parallel_apply(
self._module_copies[:len(inputs)], inputs, kwargs)
output = self.gather(outputs, self.output_device)
else:
output = self.module.val_step(*inputs, **kwargs)
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
if self._check_sync_bufs_post_fwd():
self._sync_buffers()
if (torch.is_grad_enabled()
and getattr(self, 'require_backward_grad_sync', False)
and self.require_backward_grad_sync):
if self.find_unused_parameters:
self.reducer.prepare_for_backward(list(_find_tensors(output)))
else:
self.reducer.prepare_for_backward([])
else:
if ('parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) > digit_version('1.2')):
self.require_forward_param_sync = False
return output
|
request
|
Send an HTTP request
:param method: HTTP request method, as an all caps string
:type method: str
:param path: Path for the request, with or without leading slash
:type path: str
:param query_params: Parameters to be encoded as a query string
:type query_params: dict, optional
:param headers: HTTP headers to add to the request
:type headers: dict
:param data: Data to send as the request body. May pass through encoding.
:type data: dict or str
:param encoding: A way to encode request data. "json", "form", and "text"
are all valid values. Custom encodings can be used only if they are
registered with the transport. By default, strings get "text" behavior and
all other objects get "json".
:type encoding: str
:return: :class:`GlobusHTTPResponse <globus_sdk.response.GlobusHTTPResponse>` object
|
import logging
import urllib.parse
from typing import Any, Dict, Optional, Type, Union
from globus_sdk import config, exc, utils
from globus_sdk.authorizers import GlobusAuthorizer
from globus_sdk.paging import PaginatorTable
from globus_sdk.response import GlobusHTTPResponse
from globus_sdk.scopes import ScopeBuilder
from globus_sdk.transport import RequestsTransport
log = logging.getLogger(__name__)
class BaseClient:
r"""
Abstract base class for clients with error handling for Globus APIs.
:param authorizer: A ``GlobusAuthorizer`` which will generate Authorization headers
:type authorizer: :class:`GlobusAuthorizer\
<globus_sdk.authorizers.base.GlobusAuthorizer>`
:param app_name: Optional "nice name" for the application. Has no bearing on the
semantics of client actions. It is just passed as part of the User-Agent
string, and may be useful when debugging issues with the Globus Team
:type app_name: str
:param transport_params: Options to pass to the transport for this client
:type transport_params: dict
All other parameters are for internal use and should be ignored.
"""
# service name is used to lookup a service URL from config
service_name: str = "_base"
# path under the client base URL
base_path: str = "/"
#: the class for errors raised by this client on HTTP 4xx and 5xx errors
#: this can be set in subclasses, but must always be a subclass of GlobusError
error_class: Type[exc.GlobusAPIError] = exc.GlobusAPIError
#: the type of Transport which will be used, defaults to ``RequestsTransport``
transport_class: Type[RequestsTransport] = RequestsTransport
#: the scopes for this client may be present as a ``ScopeBuilder``
scopes: Optional[ScopeBuilder] = None
def __init__(
self,
*,
environment: Optional[str] = None,
base_url: Optional[str] = None,
authorizer: Optional[GlobusAuthorizer] = None,
app_name: Optional[str] = None,
transport_params: Optional[Dict[str, Any]] = None,
):
# explicitly check the `service_name` to ensure that it was set
#
# unfortunately, we can't rely on declaring BaseClient as an ABC because it
# doesn't have any abstract methods
#
# if we declare `service_name` without a value, we get AttributeError on access
# instead of the (desired) TypeError when instantiating a BaseClient because
# it's abstract
if self.service_name == "_base":
raise NotImplementedError(
"Cannot instantiate clients which do not set a 'service_name'"
)
log.info(
f'Creating client of type {type(self)} for service "{self.service_name}"'
)
# if an environment was passed, it will be used, but otherwise lookup
# the env var -- and in the special case of `production` translate to
# `default`, regardless of the source of that value
# logs the environment when it isn't `default`
self.environment = config.get_environment_name(environment)
self.transport = self.transport_class(**(transport_params or {}))
log.debug(f"initialized transport of type {type(self.transport)}")
if not self.service_name and not base_url:
raise ValueError("Either service_name or base_url must be set")
self.base_url = utils.slash_join(
config.get_service_url(self.service_name, environment=self.environment)
if base_url is None
else base_url,
self.base_path,
)
self.authorizer = authorizer
# set application name if given
self._app_name = None
if app_name is not None:
self.app_name = app_name
# setup paginated methods
self.paginated = PaginatorTable(self)
@property
def app_name(self) -> Optional[str]:
return self._app_name
@app_name.setter
def app_name(self, value: str) -> None:
self._app_name = self.transport.user_agent = value
@utils.classproperty
def resource_server(cls) -> Optional[str]:
"""
The resource_server name for the API and scopes associated with this client.
This information is pulled from the ``scopes`` attribute of the client class.
If the client does not have associated scopes, this value will be ``None``.
"""
if cls.scopes is None:
return None
return cls.scopes.resource_server
def get(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> GlobusHTTPResponse:
"""
Make a GET request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"GET to {path} with query_params {query_params}")
return self.request("GET", path, query_params=query_params, headers=headers)
def post(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a POST request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"POST to {path} with query_params {query_params}")
return self.request(
"POST",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def delete(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> GlobusHTTPResponse:
"""
Make a DELETE request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"DELETE to {path} with query_params {query_params}")
return self.request("DELETE", path, query_params=query_params, headers=headers)
def put(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a PUT request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"PUT to {path} with query_params {query_params}")
return self.request(
"PUT",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def patch(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a PATCH request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"PATCH to {path} with query_params {query_params}")
return self.request(
"PATCH",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
# MASKED: request function (lines 237-298)
|
def request(
self,
method: str,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Send an HTTP request
:param method: HTTP request method, as an all caps string
:type method: str
:param path: Path for the request, with or without leading slash
:type path: str
:param query_params: Parameters to be encoded as a query string
:type query_params: dict, optional
:param headers: HTTP headers to add to the request
:type headers: dict
:param data: Data to send as the request body. May pass through encoding.
:type data: dict or str
:param encoding: A way to encode request data. "json", "form", and "text"
are all valid values. Custom encodings can be used only if they are
registered with the transport. By default, strings get "text" behavior and
all other objects get "json".
:type encoding: str
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
# prepare data...
# copy headers if present
rheaders = {**headers} if headers else {}
# if a client is asked to make a request against a full URL, not just the path
# component, then do not resolve the path, simply pass it through as the URL
if path.startswith("https://") or path.startswith("http://"):
url = path
else:
url = utils.slash_join(self.base_url, urllib.parse.quote(path))
# make the request
log.debug("request will hit URL: %s", url)
r = self.transport.request(
method=method,
url=url,
data=data.data if isinstance(data, utils.PayloadWrapper) else data,
query_params=query_params,
headers=rheaders,
encoding=encoding,
authorizer=self.authorizer,
)
log.debug("request made to URL: %s", r.url)
if 200 <= r.status_code < 400:
log.debug(f"request completed with response code: {r.status_code}")
return GlobusHTTPResponse(r, self)
log.debug(f"request completed with (error) response code: {r.status_code}")
raise self.error_class(r)
| 237
| 298
|
import logging
import urllib.parse
from typing import Any, Dict, Optional, Type, Union
from globus_sdk import config, exc, utils
from globus_sdk.authorizers import GlobusAuthorizer
from globus_sdk.paging import PaginatorTable
from globus_sdk.response import GlobusHTTPResponse
from globus_sdk.scopes import ScopeBuilder
from globus_sdk.transport import RequestsTransport
log = logging.getLogger(__name__)
class BaseClient:
r"""
Abstract base class for clients with error handling for Globus APIs.
:param authorizer: A ``GlobusAuthorizer`` which will generate Authorization headers
:type authorizer: :class:`GlobusAuthorizer\
<globus_sdk.authorizers.base.GlobusAuthorizer>`
:param app_name: Optional "nice name" for the application. Has no bearing on the
semantics of client actions. It is just passed as part of the User-Agent
string, and may be useful when debugging issues with the Globus Team
:type app_name: str
:param transport_params: Options to pass to the transport for this client
:type transport_params: dict
All other parameters are for internal use and should be ignored.
"""
# service name is used to lookup a service URL from config
service_name: str = "_base"
# path under the client base URL
base_path: str = "/"
#: the class for errors raised by this client on HTTP 4xx and 5xx errors
#: this can be set in subclasses, but must always be a subclass of GlobusError
error_class: Type[exc.GlobusAPIError] = exc.GlobusAPIError
#: the type of Transport which will be used, defaults to ``RequestsTransport``
transport_class: Type[RequestsTransport] = RequestsTransport
#: the scopes for this client may be present as a ``ScopeBuilder``
scopes: Optional[ScopeBuilder] = None
def __init__(
self,
*,
environment: Optional[str] = None,
base_url: Optional[str] = None,
authorizer: Optional[GlobusAuthorizer] = None,
app_name: Optional[str] = None,
transport_params: Optional[Dict[str, Any]] = None,
):
# explicitly check the `service_name` to ensure that it was set
#
# unfortunately, we can't rely on declaring BaseClient as an ABC because it
# doesn't have any abstract methods
#
# if we declare `service_name` without a value, we get AttributeError on access
# instead of the (desired) TypeError when instantiating a BaseClient because
# it's abstract
if self.service_name == "_base":
raise NotImplementedError(
"Cannot instantiate clients which do not set a 'service_name'"
)
log.info(
f'Creating client of type {type(self)} for service "{self.service_name}"'
)
# if an environment was passed, it will be used, but otherwise lookup
# the env var -- and in the special case of `production` translate to
# `default`, regardless of the source of that value
# logs the environment when it isn't `default`
self.environment = config.get_environment_name(environment)
self.transport = self.transport_class(**(transport_params or {}))
log.debug(f"initialized transport of type {type(self.transport)}")
if not self.service_name and not base_url:
raise ValueError("Either service_name or base_url must be set")
self.base_url = utils.slash_join(
config.get_service_url(self.service_name, environment=self.environment)
if base_url is None
else base_url,
self.base_path,
)
self.authorizer = authorizer
# set application name if given
self._app_name = None
if app_name is not None:
self.app_name = app_name
# setup paginated methods
self.paginated = PaginatorTable(self)
@property
def app_name(self) -> Optional[str]:
return self._app_name
@app_name.setter
def app_name(self, value: str) -> None:
self._app_name = self.transport.user_agent = value
@utils.classproperty
def resource_server(cls) -> Optional[str]:
"""
The resource_server name for the API and scopes associated with this client.
This information is pulled from the ``scopes`` attribute of the client class.
If the client does not have associated scopes, this value will be ``None``.
"""
if cls.scopes is None:
return None
return cls.scopes.resource_server
def get(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> GlobusHTTPResponse:
"""
Make a GET request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"GET to {path} with query_params {query_params}")
return self.request("GET", path, query_params=query_params, headers=headers)
def post(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a POST request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"POST to {path} with query_params {query_params}")
return self.request(
"POST",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def delete(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
) -> GlobusHTTPResponse:
"""
Make a DELETE request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"DELETE to {path} with query_params {query_params}")
return self.request("DELETE", path, query_params=query_params, headers=headers)
def put(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a PUT request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"PUT to {path} with query_params {query_params}")
return self.request(
"PUT",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def patch(
self,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Make a PATCH request to the specified path.
See :py:meth:`~.BaseClient.request` for details on the various parameters.
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
log.debug(f"PATCH to {path} with query_params {query_params}")
return self.request(
"PATCH",
path,
query_params=query_params,
data=data,
headers=headers,
encoding=encoding,
)
def request(
self,
method: str,
path: str,
*,
query_params: Optional[Dict[str, Any]] = None,
data: Union[None, Dict[str, Any], utils.PayloadWrapper] = None,
headers: Optional[Dict[str, str]] = None,
encoding: Optional[str] = None,
) -> GlobusHTTPResponse:
"""
Send an HTTP request
:param method: HTTP request method, as an all caps string
:type method: str
:param path: Path for the request, with or without leading slash
:type path: str
:param query_params: Parameters to be encoded as a query string
:type query_params: dict, optional
:param headers: HTTP headers to add to the request
:type headers: dict
:param data: Data to send as the request body. May pass through encoding.
:type data: dict or str
:param encoding: A way to encode request data. "json", "form", and "text"
are all valid values. Custom encodings can be used only if they are
registered with the transport. By default, strings get "text" behavior and
all other objects get "json".
:type encoding: str
:return: :class:`GlobusHTTPResponse \
<globus_sdk.response.GlobusHTTPResponse>` object
"""
# prepare data...
# copy headers if present
rheaders = {**headers} if headers else {}
# if a client is asked to make a request against a full URL, not just the path
# component, then do not resolve the path, simply pass it through as the URL
if path.startswith("https://") or path.startswith("http://"):
url = path
else:
url = utils.slash_join(self.base_url, urllib.parse.quote(path))
# make the request
log.debug("request will hit URL: %s", url)
r = self.transport.request(
method=method,
url=url,
data=data.data if isinstance(data, utils.PayloadWrapper) else data,
query_params=query_params,
headers=rheaders,
encoding=encoding,
authorizer=self.authorizer,
)
log.debug("request made to URL: %s", r.url)
if 200 <= r.status_code < 400:
log.debug(f"request completed with response code: {r.status_code}")
return GlobusHTTPResponse(r, self)
log.debug(f"request completed with (error) response code: {r.status_code}")
raise self.error_class(r)
|
build_stats
|
Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions and classes used by both keras cifar and imagenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
import tensorflow_model_optimization as tfmot
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
FLAGS = flags.FLAGS
BASE_LEARNING_RATE = 0.1 # This matches Jing's version.
TRAIN_TOP_1 = 'training_accuracy_top_1'
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def learning_rate_schedule(current_epoch,
current_batch,
steps_per_epoch,
batch_size):
"""Handles linear scaling rule, gradual warmup, and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
steps_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
"""
initial_lr = BASE_LEARNING_RATE * batch_size / 256
epoch = current_epoch + float(current_batch) / steps_per_epoch
warmup_lr_multiplier, warmup_end_epoch = LR_SCHEDULE[0]
if epoch < warmup_end_epoch:
# Learning rate increases linearly per step.
return initial_lr * warmup_lr_multiplier * epoch / warmup_end_epoch
for mult, start_epoch in LR_SCHEDULE:
if epoch >= start_epoch:
learning_rate = initial_lr * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, steps_per_epoch):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
multipliers, compute_lr_on_cpu=True, name=None):
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError('The length of boundaries must be 1 less than the '
'length of multipliers')
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
self.lr_values = [self.rescaled_lr * m for m in multipliers]
self.warmup_steps = warmup_epochs * steps_per_epoch
self.compute_lr_on_cpu = compute_lr_on_cpu
self.name = name
self.learning_rate_ops_cache = {}
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(
step, self.step_boundaries, self.lr_values)
return tf.cond(step < self.warmup_steps,
lambda: warmup_lr(step),
lambda: piecewise_lr(step))
def get_config(self):
return {
'rescaled_lr': self.rescaled_lr,
'step_boundaries': self.step_boundaries,
'lr_values': self.lr_values,
'warmup_steps': self.warmup_steps,
'compute_lr_on_cpu': self.compute_lr_on_cpu,
'name': self.name
}
def get_optimizer(learning_rate=0.1):
"""Returns optimizer to use."""
# The learning_rate is overwritten at the beginning of each step by callback.
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
# TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code.
def get_callbacks(
steps_per_epoch,
learning_rate_schedule_fn=None,
pruning_method=None,
enable_checkpoint_and_export=False,
model_dir=None):
"""Returns common callbacks."""
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if not FLAGS.use_tensor_lr and learning_rate_schedule_fn:
lr_callback = LearningRateBatchScheduler(
learning_rate_schedule_fn,
batch_size=FLAGS.batch_size,
steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(
FLAGS.model_dir,
FLAGS.profile_steps,
FLAGS.enable_tensorboard,
steps_per_epoch)
callbacks.append(profiler_callback)
is_pruning_enabled = pruning_method is not None
if is_pruning_enabled:
callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
if model_dir is not None:
callbacks.append(tfmot.sparsity.keras.PruningSummaries(
log_dir=model_dir, profile_batch=0))
if enable_checkpoint_and_export:
if model_dir is not None:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True))
return callbacks
# MASKED: build_stats function (lines 230-273)
def define_keras_flags(
dynamic_loss_scale=True,
model=False,
optimizer=False,
pretrained_filepath=False):
"""Define flags for Keras models."""
flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True,
train_epochs=True, epochs_between_evals=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
synthetic_data=True,
dtype=True,
all_reduce_alg=True,
num_packs=True,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
dynamic_loss_scale=dynamic_loss_scale,
loss_scale=True,
fp16_implementation=True,
tf_data_experimental_slack=True,
enable_xla=True,
force_v2_in_keras_compile=True,
training_dataset_cache=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
# TODO(b/135607288): Remove this flag once we understand the root cause of
# slowdown when setting the learning phase in Keras backend.
flags.DEFINE_boolean(
name='set_learning_phase_to_train', default=True,
help='If skip eval, also set Keras learning phase to 1 (training).')
flags.DEFINE_boolean(
name='explicit_gpu_placement', default=False,
help='If not using distribution strategy, explicitly set device scope '
'for the Keras training loop.')
flags.DEFINE_boolean(name='use_trivial_model', default=False,
help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False,
help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(
name='enable_tensorboard', default=False,
help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(
name='train_steps', default=None,
help='The number of steps to run for training. If it is larger than '
'# batches per epoch, then use # batches per epoch. This flag will be '
'ignored if train_epochs is set to be larger than 1. ')
flags.DEFINE_string(
name='profile_steps', default=None,
help='Save profiling data to model dir at given range of global steps. The '
'value must be a comma separated pair of positive integers, specifying '
'the first and last step to profile. For example, "--profile_steps=2,4" '
'triggers the profiler to process 3 steps, starting from the 2nd step. '
'Note that profiler has a non-trivial performance overhead, and the '
'output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(
name='batchnorm_spatial_persistent', default=True,
help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(
name='enable_get_next_as_optional', default=False,
help='Enable get_next_as_optional behavior in DistributedIterator.')
flags.DEFINE_boolean(
name='enable_checkpoint_and_export', default=False,
help='Whether to enable a checkpoint callback and export the savedmodel.')
flags.DEFINE_string(
name='tpu', default='', help='TPU address to connect to.')
flags.DEFINE_integer(
name='steps_per_loop',
default=500,
help='Number of steps per training loop. Only training step happens '
'inside the loop. Callbacks will not be called inside. Will be capped at '
'steps per epoch.')
flags.DEFINE_boolean(
name='use_tf_while_loop',
default=True,
help='Whether to build a tf.while_loop inside the training loop on the '
'host. Setting it to True is critical to have peak performance on '
'TPU.')
flags.DEFINE_boolean(
name='use_tf_keras_layers', default=False,
help='Whether to use tf.keras.layers instead of tf.python.keras.layers.'
'It only changes imagenet resnet model layers for now. This flag is '
'a temporal flag during transition to tf.keras.layers. Do not use this '
'flag for external usage. this will be removed shortly.')
if model:
flags.DEFINE_string('model', 'resnet50_v1.5',
'Name of model preset. (mobilenet, resnet50_v1.5)')
if optimizer:
flags.DEFINE_string('optimizer', 'resnet50_default',
'Name of optimizer preset. '
'(mobilenet_default, resnet50_default)')
# TODO(kimjaehong): Replace as general hyper-params not only for mobilenet.
flags.DEFINE_float('initial_learning_rate_per_sample', 0.00007,
'Initial value of learning rate per sample for '
'mobilenet_default.')
flags.DEFINE_float('lr_decay_factor', 0.94,
'Learning rate decay factor for mobilenet_default.')
flags.DEFINE_float('num_epochs_per_decay', 2.5,
'Number of epochs per decay for mobilenet_default.')
if pretrained_filepath:
flags.DEFINE_string('pretrained_filepath', '',
'Pretrained file path.')
def get_synth_data(height, width, num_channels, num_classes, dtype):
"""Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels.
"""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal([height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform([1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
return inputs, labels
def define_pruning_flags():
"""Define flags for pruning methods."""
flags.DEFINE_string('pruning_method', None,
'Pruning method.'
'None (no pruning) or polynomial_decay.')
flags.DEFINE_float('pruning_initial_sparsity', 0.0,
'Initial sparsity for pruning.')
flags.DEFINE_float('pruning_final_sparsity', 0.5,
'Final sparsity for pruning.')
flags.DEFINE_integer('pruning_begin_step', 0,
'Begin step for pruning.')
flags.DEFINE_integer('pruning_end_step', 100000,
'End step for pruning.')
flags.DEFINE_integer('pruning_frequency', 100,
'Frequency for pruning.')
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32, drop_remainder=True):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
inputs, labels = get_synth_data(height=height,
width=width,
num_channels=num_channels,
num_classes=num_classes,
dtype=dtype)
# Cast to float32 for Keras model.
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
# `drop_remainder` will make dataset produce outputs with known shapes.
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def set_cudnn_batchnorm_mode():
"""Set CuDNN batchnorm mode for better performance.
Note: Spatial Persistent mode may lead to accuracy losses for certain
models.
"""
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
|
def build_stats(history, eval_output, callbacks):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if not callbacks:
return stats
# Look for the time history callback which was used during keras.fit
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
callback.batch_size * callback.log_steps *
(len(callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
| 230
| 273
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions and classes used by both keras cifar and imagenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
import tensorflow_model_optimization as tfmot
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
FLAGS = flags.FLAGS
BASE_LEARNING_RATE = 0.1 # This matches Jing's version.
TRAIN_TOP_1 = 'training_accuracy_top_1'
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def learning_rate_schedule(current_epoch,
current_batch,
steps_per_epoch,
batch_size):
"""Handles linear scaling rule, gradual warmup, and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
steps_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
"""
initial_lr = BASE_LEARNING_RATE * batch_size / 256
epoch = current_epoch + float(current_batch) / steps_per_epoch
warmup_lr_multiplier, warmup_end_epoch = LR_SCHEDULE[0]
if epoch < warmup_end_epoch:
# Learning rate increases linearly per step.
return initial_lr * warmup_lr_multiplier * epoch / warmup_end_epoch
for mult, start_epoch in LR_SCHEDULE:
if epoch >= start_epoch:
learning_rate = initial_lr * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, steps_per_epoch):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
multipliers, compute_lr_on_cpu=True, name=None):
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError('The length of boundaries must be 1 less than the '
'length of multipliers')
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
self.lr_values = [self.rescaled_lr * m for m in multipliers]
self.warmup_steps = warmup_epochs * steps_per_epoch
self.compute_lr_on_cpu = compute_lr_on_cpu
self.name = name
self.learning_rate_ops_cache = {}
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(
step, self.step_boundaries, self.lr_values)
return tf.cond(step < self.warmup_steps,
lambda: warmup_lr(step),
lambda: piecewise_lr(step))
def get_config(self):
return {
'rescaled_lr': self.rescaled_lr,
'step_boundaries': self.step_boundaries,
'lr_values': self.lr_values,
'warmup_steps': self.warmup_steps,
'compute_lr_on_cpu': self.compute_lr_on_cpu,
'name': self.name
}
def get_optimizer(learning_rate=0.1):
"""Returns optimizer to use."""
# The learning_rate is overwritten at the beginning of each step by callback.
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
# TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code.
def get_callbacks(
steps_per_epoch,
learning_rate_schedule_fn=None,
pruning_method=None,
enable_checkpoint_and_export=False,
model_dir=None):
"""Returns common callbacks."""
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if not FLAGS.use_tensor_lr and learning_rate_schedule_fn:
lr_callback = LearningRateBatchScheduler(
learning_rate_schedule_fn,
batch_size=FLAGS.batch_size,
steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(
FLAGS.model_dir,
FLAGS.profile_steps,
FLAGS.enable_tensorboard,
steps_per_epoch)
callbacks.append(profiler_callback)
is_pruning_enabled = pruning_method is not None
if is_pruning_enabled:
callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
if model_dir is not None:
callbacks.append(tfmot.sparsity.keras.PruningSummaries(
log_dir=model_dir, profile_batch=0))
if enable_checkpoint_and_export:
if model_dir is not None:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True))
return callbacks
def build_stats(history, eval_output, callbacks):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if not callbacks:
return stats
# Look for the time history callback which was used during keras.fit
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
callback.batch_size * callback.log_steps *
(len(callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def define_keras_flags(
dynamic_loss_scale=True,
model=False,
optimizer=False,
pretrained_filepath=False):
"""Define flags for Keras models."""
flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True,
train_epochs=True, epochs_between_evals=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
synthetic_data=True,
dtype=True,
all_reduce_alg=True,
num_packs=True,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
dynamic_loss_scale=dynamic_loss_scale,
loss_scale=True,
fp16_implementation=True,
tf_data_experimental_slack=True,
enable_xla=True,
force_v2_in_keras_compile=True,
training_dataset_cache=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
# TODO(b/135607288): Remove this flag once we understand the root cause of
# slowdown when setting the learning phase in Keras backend.
flags.DEFINE_boolean(
name='set_learning_phase_to_train', default=True,
help='If skip eval, also set Keras learning phase to 1 (training).')
flags.DEFINE_boolean(
name='explicit_gpu_placement', default=False,
help='If not using distribution strategy, explicitly set device scope '
'for the Keras training loop.')
flags.DEFINE_boolean(name='use_trivial_model', default=False,
help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False,
help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(
name='enable_tensorboard', default=False,
help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(
name='train_steps', default=None,
help='The number of steps to run for training. If it is larger than '
'# batches per epoch, then use # batches per epoch. This flag will be '
'ignored if train_epochs is set to be larger than 1. ')
flags.DEFINE_string(
name='profile_steps', default=None,
help='Save profiling data to model dir at given range of global steps. The '
'value must be a comma separated pair of positive integers, specifying '
'the first and last step to profile. For example, "--profile_steps=2,4" '
'triggers the profiler to process 3 steps, starting from the 2nd step. '
'Note that profiler has a non-trivial performance overhead, and the '
'output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(
name='batchnorm_spatial_persistent', default=True,
help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(
name='enable_get_next_as_optional', default=False,
help='Enable get_next_as_optional behavior in DistributedIterator.')
flags.DEFINE_boolean(
name='enable_checkpoint_and_export', default=False,
help='Whether to enable a checkpoint callback and export the savedmodel.')
flags.DEFINE_string(
name='tpu', default='', help='TPU address to connect to.')
flags.DEFINE_integer(
name='steps_per_loop',
default=500,
help='Number of steps per training loop. Only training step happens '
'inside the loop. Callbacks will not be called inside. Will be capped at '
'steps per epoch.')
flags.DEFINE_boolean(
name='use_tf_while_loop',
default=True,
help='Whether to build a tf.while_loop inside the training loop on the '
'host. Setting it to True is critical to have peak performance on '
'TPU.')
flags.DEFINE_boolean(
name='use_tf_keras_layers', default=False,
help='Whether to use tf.keras.layers instead of tf.python.keras.layers.'
'It only changes imagenet resnet model layers for now. This flag is '
'a temporal flag during transition to tf.keras.layers. Do not use this '
'flag for external usage. this will be removed shortly.')
if model:
flags.DEFINE_string('model', 'resnet50_v1.5',
'Name of model preset. (mobilenet, resnet50_v1.5)')
if optimizer:
flags.DEFINE_string('optimizer', 'resnet50_default',
'Name of optimizer preset. '
'(mobilenet_default, resnet50_default)')
# TODO(kimjaehong): Replace as general hyper-params not only for mobilenet.
flags.DEFINE_float('initial_learning_rate_per_sample', 0.00007,
'Initial value of learning rate per sample for '
'mobilenet_default.')
flags.DEFINE_float('lr_decay_factor', 0.94,
'Learning rate decay factor for mobilenet_default.')
flags.DEFINE_float('num_epochs_per_decay', 2.5,
'Number of epochs per decay for mobilenet_default.')
if pretrained_filepath:
flags.DEFINE_string('pretrained_filepath', '',
'Pretrained file path.')
def get_synth_data(height, width, num_channels, num_classes, dtype):
"""Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels.
"""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal([height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform([1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
return inputs, labels
def define_pruning_flags():
"""Define flags for pruning methods."""
flags.DEFINE_string('pruning_method', None,
'Pruning method.'
'None (no pruning) or polynomial_decay.')
flags.DEFINE_float('pruning_initial_sparsity', 0.0,
'Initial sparsity for pruning.')
flags.DEFINE_float('pruning_final_sparsity', 0.5,
'Final sparsity for pruning.')
flags.DEFINE_integer('pruning_begin_step', 0,
'Begin step for pruning.')
flags.DEFINE_integer('pruning_end_step', 100000,
'End step for pruning.')
flags.DEFINE_integer('pruning_frequency', 100,
'Frequency for pruning.')
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32, drop_remainder=True):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
inputs, labels = get_synth_data(height=height,
width=width,
num_channels=num_channels,
num_classes=num_classes,
dtype=dtype)
# Cast to float32 for Keras model.
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
# `drop_remainder` will make dataset produce outputs with known shapes.
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def set_cudnn_batchnorm_mode():
"""Set CuDNN batchnorm mode for better performance.
Note: Spatial Persistent mode may lead to accuracy losses for certain
models.
"""
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
|
get_synth_data
|
Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels.
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions and classes used by both keras cifar and imagenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
import tensorflow_model_optimization as tfmot
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
FLAGS = flags.FLAGS
BASE_LEARNING_RATE = 0.1 # This matches Jing's version.
TRAIN_TOP_1 = 'training_accuracy_top_1'
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def learning_rate_schedule(current_epoch,
current_batch,
steps_per_epoch,
batch_size):
"""Handles linear scaling rule, gradual warmup, and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
steps_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
"""
initial_lr = BASE_LEARNING_RATE * batch_size / 256
epoch = current_epoch + float(current_batch) / steps_per_epoch
warmup_lr_multiplier, warmup_end_epoch = LR_SCHEDULE[0]
if epoch < warmup_end_epoch:
# Learning rate increases linearly per step.
return initial_lr * warmup_lr_multiplier * epoch / warmup_end_epoch
for mult, start_epoch in LR_SCHEDULE:
if epoch >= start_epoch:
learning_rate = initial_lr * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, steps_per_epoch):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
multipliers, compute_lr_on_cpu=True, name=None):
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError('The length of boundaries must be 1 less than the '
'length of multipliers')
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
self.lr_values = [self.rescaled_lr * m for m in multipliers]
self.warmup_steps = warmup_epochs * steps_per_epoch
self.compute_lr_on_cpu = compute_lr_on_cpu
self.name = name
self.learning_rate_ops_cache = {}
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(
step, self.step_boundaries, self.lr_values)
return tf.cond(step < self.warmup_steps,
lambda: warmup_lr(step),
lambda: piecewise_lr(step))
def get_config(self):
return {
'rescaled_lr': self.rescaled_lr,
'step_boundaries': self.step_boundaries,
'lr_values': self.lr_values,
'warmup_steps': self.warmup_steps,
'compute_lr_on_cpu': self.compute_lr_on_cpu,
'name': self.name
}
def get_optimizer(learning_rate=0.1):
"""Returns optimizer to use."""
# The learning_rate is overwritten at the beginning of each step by callback.
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
# TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code.
def get_callbacks(
steps_per_epoch,
learning_rate_schedule_fn=None,
pruning_method=None,
enable_checkpoint_and_export=False,
model_dir=None):
"""Returns common callbacks."""
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if not FLAGS.use_tensor_lr and learning_rate_schedule_fn:
lr_callback = LearningRateBatchScheduler(
learning_rate_schedule_fn,
batch_size=FLAGS.batch_size,
steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(
FLAGS.model_dir,
FLAGS.profile_steps,
FLAGS.enable_tensorboard,
steps_per_epoch)
callbacks.append(profiler_callback)
is_pruning_enabled = pruning_method is not None
if is_pruning_enabled:
callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
if model_dir is not None:
callbacks.append(tfmot.sparsity.keras.PruningSummaries(
log_dir=model_dir, profile_batch=0))
if enable_checkpoint_and_export:
if model_dir is not None:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True))
return callbacks
def build_stats(history, eval_output, callbacks):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if not callbacks:
return stats
# Look for the time history callback which was used during keras.fit
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
callback.batch_size * callback.log_steps *
(len(callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def define_keras_flags(
dynamic_loss_scale=True,
model=False,
optimizer=False,
pretrained_filepath=False):
"""Define flags for Keras models."""
flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True,
train_epochs=True, epochs_between_evals=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
synthetic_data=True,
dtype=True,
all_reduce_alg=True,
num_packs=True,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
dynamic_loss_scale=dynamic_loss_scale,
loss_scale=True,
fp16_implementation=True,
tf_data_experimental_slack=True,
enable_xla=True,
force_v2_in_keras_compile=True,
training_dataset_cache=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
# TODO(b/135607288): Remove this flag once we understand the root cause of
# slowdown when setting the learning phase in Keras backend.
flags.DEFINE_boolean(
name='set_learning_phase_to_train', default=True,
help='If skip eval, also set Keras learning phase to 1 (training).')
flags.DEFINE_boolean(
name='explicit_gpu_placement', default=False,
help='If not using distribution strategy, explicitly set device scope '
'for the Keras training loop.')
flags.DEFINE_boolean(name='use_trivial_model', default=False,
help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False,
help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(
name='enable_tensorboard', default=False,
help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(
name='train_steps', default=None,
help='The number of steps to run for training. If it is larger than '
'# batches per epoch, then use # batches per epoch. This flag will be '
'ignored if train_epochs is set to be larger than 1. ')
flags.DEFINE_string(
name='profile_steps', default=None,
help='Save profiling data to model dir at given range of global steps. The '
'value must be a comma separated pair of positive integers, specifying '
'the first and last step to profile. For example, "--profile_steps=2,4" '
'triggers the profiler to process 3 steps, starting from the 2nd step. '
'Note that profiler has a non-trivial performance overhead, and the '
'output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(
name='batchnorm_spatial_persistent', default=True,
help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(
name='enable_get_next_as_optional', default=False,
help='Enable get_next_as_optional behavior in DistributedIterator.')
flags.DEFINE_boolean(
name='enable_checkpoint_and_export', default=False,
help='Whether to enable a checkpoint callback and export the savedmodel.')
flags.DEFINE_string(
name='tpu', default='', help='TPU address to connect to.')
flags.DEFINE_integer(
name='steps_per_loop',
default=500,
help='Number of steps per training loop. Only training step happens '
'inside the loop. Callbacks will not be called inside. Will be capped at '
'steps per epoch.')
flags.DEFINE_boolean(
name='use_tf_while_loop',
default=True,
help='Whether to build a tf.while_loop inside the training loop on the '
'host. Setting it to True is critical to have peak performance on '
'TPU.')
flags.DEFINE_boolean(
name='use_tf_keras_layers', default=False,
help='Whether to use tf.keras.layers instead of tf.python.keras.layers.'
'It only changes imagenet resnet model layers for now. This flag is '
'a temporal flag during transition to tf.keras.layers. Do not use this '
'flag for external usage. this will be removed shortly.')
if model:
flags.DEFINE_string('model', 'resnet50_v1.5',
'Name of model preset. (mobilenet, resnet50_v1.5)')
if optimizer:
flags.DEFINE_string('optimizer', 'resnet50_default',
'Name of optimizer preset. '
'(mobilenet_default, resnet50_default)')
# TODO(kimjaehong): Replace as general hyper-params not only for mobilenet.
flags.DEFINE_float('initial_learning_rate_per_sample', 0.00007,
'Initial value of learning rate per sample for '
'mobilenet_default.')
flags.DEFINE_float('lr_decay_factor', 0.94,
'Learning rate decay factor for mobilenet_default.')
flags.DEFINE_float('num_epochs_per_decay', 2.5,
'Number of epochs per decay for mobilenet_default.')
if pretrained_filepath:
flags.DEFINE_string('pretrained_filepath', '',
'Pretrained file path.')
# MASKED: get_synth_data function (lines 387-413)
def define_pruning_flags():
"""Define flags for pruning methods."""
flags.DEFINE_string('pruning_method', None,
'Pruning method.'
'None (no pruning) or polynomial_decay.')
flags.DEFINE_float('pruning_initial_sparsity', 0.0,
'Initial sparsity for pruning.')
flags.DEFINE_float('pruning_final_sparsity', 0.5,
'Final sparsity for pruning.')
flags.DEFINE_integer('pruning_begin_step', 0,
'Begin step for pruning.')
flags.DEFINE_integer('pruning_end_step', 100000,
'End step for pruning.')
flags.DEFINE_integer('pruning_frequency', 100,
'Frequency for pruning.')
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32, drop_remainder=True):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
inputs, labels = get_synth_data(height=height,
width=width,
num_channels=num_channels,
num_classes=num_classes,
dtype=dtype)
# Cast to float32 for Keras model.
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
# `drop_remainder` will make dataset produce outputs with known shapes.
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def set_cudnn_batchnorm_mode():
"""Set CuDNN batchnorm mode for better performance.
Note: Spatial Persistent mode may lead to accuracy losses for certain
models.
"""
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
|
def get_synth_data(height, width, num_channels, num_classes, dtype):
"""Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels.
"""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal([height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform([1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
return inputs, labels
| 387
| 413
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Common util functions and classes used by both keras cifar and imagenet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
import tensorflow_model_optimization as tfmot
from official.utils.flags import core as flags_core
from official.utils.misc import keras_utils
FLAGS = flags.FLAGS
BASE_LEARNING_RATE = 0.1 # This matches Jing's version.
TRAIN_TOP_1 = 'training_accuracy_top_1'
LR_SCHEDULE = [ # (multiplier, epoch to start) tuples
(1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]
def learning_rate_schedule(current_epoch,
current_batch,
steps_per_epoch,
batch_size):
"""Handles linear scaling rule, gradual warmup, and LR decay.
Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
provided scaling factor.
Args:
current_epoch: integer, current epoch indexed from 0.
current_batch: integer, current batch in the current epoch, indexed from 0.
steps_per_epoch: integer, number of steps in an epoch.
batch_size: integer, total batch sized.
Returns:
Adjusted learning rate.
"""
initial_lr = BASE_LEARNING_RATE * batch_size / 256
epoch = current_epoch + float(current_batch) / steps_per_epoch
warmup_lr_multiplier, warmup_end_epoch = LR_SCHEDULE[0]
if epoch < warmup_end_epoch:
# Learning rate increases linearly per step.
return initial_lr * warmup_lr_multiplier * epoch / warmup_end_epoch
for mult, start_epoch in LR_SCHEDULE:
if epoch >= start_epoch:
learning_rate = initial_lr * mult
else:
break
return learning_rate
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
"""Callback to update learning rate on every batch (not epoch boundaries).
N.B. Only support Keras optimizers, not TF optimizers.
Attributes:
schedule: a function that takes an epoch index and a batch index as input
(both integer, indexed from 0) and returns a new learning rate as
output (float).
"""
def __init__(self, schedule, batch_size, steps_per_epoch):
super(LearningRateBatchScheduler, self).__init__()
self.schedule = schedule
self.steps_per_epoch = steps_per_epoch
self.batch_size = batch_size
self.epochs = -1
self.prev_lr = -1
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'learning_rate'):
raise ValueError('Optimizer must have a "learning_rate" attribute.')
self.epochs += 1
def on_batch_begin(self, batch, logs=None):
"""Executes before step begins."""
lr = self.schedule(self.epochs,
batch,
self.steps_per_epoch,
self.batch_size)
if not isinstance(lr, (float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function should be float.')
if lr != self.prev_lr:
self.model.optimizer.learning_rate = lr # lr should be a float here
self.prev_lr = lr
tf.compat.v1.logging.debug(
'Epoch %05d Batch %05d: LearningRateBatchScheduler '
'change learning rate to %s.', self.epochs, batch, lr)
class PiecewiseConstantDecayWithWarmup(
tf.keras.optimizers.schedules.LearningRateSchedule):
"""Piecewise constant decay with warmup schedule."""
def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
multipliers, compute_lr_on_cpu=True, name=None):
super(PiecewiseConstantDecayWithWarmup, self).__init__()
if len(boundaries) != len(multipliers) - 1:
raise ValueError('The length of boundaries must be 1 less than the '
'length of multipliers')
base_lr_batch_size = 256
steps_per_epoch = epoch_size // batch_size
self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
self.lr_values = [self.rescaled_lr * m for m in multipliers]
self.warmup_steps = warmup_epochs * steps_per_epoch
self.compute_lr_on_cpu = compute_lr_on_cpu
self.name = name
self.learning_rate_ops_cache = {}
def __call__(self, step):
if tf.executing_eagerly():
return self._get_learning_rate(step)
# In an eager function or graph, the current implementation of optimizer
# repeatedly call and thus create ops for the learning rate schedule. To
# avoid this, we cache the ops if not executing eagerly.
graph = tf.compat.v1.get_default_graph()
if graph not in self.learning_rate_ops_cache:
if self.compute_lr_on_cpu:
with tf.device('/device:CPU:0'):
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
else:
self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
return self.learning_rate_ops_cache[graph]
def _get_learning_rate(self, step):
"""Compute learning rate at given step."""
with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
[self.rescaled_lr, self.step_boundaries,
self.lr_values, self.warmup_steps,
self.compute_lr_on_cpu]):
def warmup_lr(step):
return self.rescaled_lr * (
tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
def piecewise_lr(step):
return tf.compat.v1.train.piecewise_constant(
step, self.step_boundaries, self.lr_values)
return tf.cond(step < self.warmup_steps,
lambda: warmup_lr(step),
lambda: piecewise_lr(step))
def get_config(self):
return {
'rescaled_lr': self.rescaled_lr,
'step_boundaries': self.step_boundaries,
'lr_values': self.lr_values,
'warmup_steps': self.warmup_steps,
'compute_lr_on_cpu': self.compute_lr_on_cpu,
'name': self.name
}
def get_optimizer(learning_rate=0.1):
"""Returns optimizer to use."""
# The learning_rate is overwritten at the beginning of each step by callback.
return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
# TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code.
def get_callbacks(
steps_per_epoch,
learning_rate_schedule_fn=None,
pruning_method=None,
enable_checkpoint_and_export=False,
model_dir=None):
"""Returns common callbacks."""
time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
callbacks = [time_callback]
if not FLAGS.use_tensor_lr and learning_rate_schedule_fn:
lr_callback = LearningRateBatchScheduler(
learning_rate_schedule_fn,
batch_size=FLAGS.batch_size,
steps_per_epoch=steps_per_epoch)
callbacks.append(lr_callback)
if FLAGS.enable_tensorboard:
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=FLAGS.model_dir)
callbacks.append(tensorboard_callback)
if FLAGS.profile_steps:
profiler_callback = keras_utils.get_profiler_callback(
FLAGS.model_dir,
FLAGS.profile_steps,
FLAGS.enable_tensorboard,
steps_per_epoch)
callbacks.append(profiler_callback)
is_pruning_enabled = pruning_method is not None
if is_pruning_enabled:
callbacks.append(tfmot.sparsity.keras.UpdatePruningStep())
if model_dir is not None:
callbacks.append(tfmot.sparsity.keras.PruningSummaries(
log_dir=model_dir, profile_batch=0))
if enable_checkpoint_and_export:
if model_dir is not None:
ckpt_full_path = os.path.join(model_dir, 'model.ckpt-{epoch:04d}')
callbacks.append(
tf.keras.callbacks.ModelCheckpoint(ckpt_full_path,
save_weights_only=True))
return callbacks
def build_stats(history, eval_output, callbacks):
"""Normalizes and returns dictionary of stats.
Args:
history: Results of the training step. Supports both categorical_accuracy
and sparse_categorical_accuracy.
eval_output: Output of the eval step. Assumes first value is eval_loss and
second value is accuracy_top_1.
callbacks: a list of callbacks which might include a time history callback
used during keras.fit.
Returns:
Dictionary of normalized results.
"""
stats = {}
if eval_output:
stats['accuracy_top_1'] = eval_output[1].item()
stats['eval_loss'] = eval_output[0].item()
if history and history.history:
train_hist = history.history
# Gets final loss from training.
stats['loss'] = train_hist['loss'][-1].item()
# Gets top_1 training accuracy.
if 'categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
elif 'sparse_categorical_accuracy' in train_hist:
stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()
if not callbacks:
return stats
# Look for the time history callback which was used during keras.fit
for callback in callbacks:
if isinstance(callback, keras_utils.TimeHistory):
timestamp_log = callback.timestamp_log
stats['step_timestamp_log'] = timestamp_log
stats['train_finish_time'] = callback.train_finish_time
if len(timestamp_log) > 1:
stats['avg_exp_per_second'] = (
callback.batch_size * callback.log_steps *
(len(callback.timestamp_log)-1) /
(timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
return stats
def define_keras_flags(
dynamic_loss_scale=True,
model=False,
optimizer=False,
pretrained_filepath=False):
"""Define flags for Keras models."""
flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True,
train_epochs=True, epochs_between_evals=True,
distribution_strategy=True)
flags_core.define_performance(num_parallel_calls=False,
synthetic_data=True,
dtype=True,
all_reduce_alg=True,
num_packs=True,
tf_gpu_thread_mode=True,
datasets_num_private_threads=True,
dynamic_loss_scale=dynamic_loss_scale,
loss_scale=True,
fp16_implementation=True,
tf_data_experimental_slack=True,
enable_xla=True,
force_v2_in_keras_compile=True,
training_dataset_cache=True)
flags_core.define_image()
flags_core.define_benchmark()
flags_core.define_distribution()
flags.adopt_module_key_flags(flags_core)
flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
# TODO(b/135607288): Remove this flag once we understand the root cause of
# slowdown when setting the learning phase in Keras backend.
flags.DEFINE_boolean(
name='set_learning_phase_to_train', default=True,
help='If skip eval, also set Keras learning phase to 1 (training).')
flags.DEFINE_boolean(
name='explicit_gpu_placement', default=False,
help='If not using distribution strategy, explicitly set device scope '
'for the Keras training loop.')
flags.DEFINE_boolean(name='use_trivial_model', default=False,
help='Whether to use a trivial Keras model.')
flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
help='Report metrics during training and evaluation.')
flags.DEFINE_boolean(name='use_tensor_lr', default=False,
help='Use learning rate tensor instead of a callback.')
flags.DEFINE_boolean(
name='enable_tensorboard', default=False,
help='Whether to enable Tensorboard callback.')
flags.DEFINE_integer(
name='train_steps', default=None,
help='The number of steps to run for training. If it is larger than '
'# batches per epoch, then use # batches per epoch. This flag will be '
'ignored if train_epochs is set to be larger than 1. ')
flags.DEFINE_string(
name='profile_steps', default=None,
help='Save profiling data to model dir at given range of global steps. The '
'value must be a comma separated pair of positive integers, specifying '
'the first and last step to profile. For example, "--profile_steps=2,4" '
'triggers the profiler to process 3 steps, starting from the 2nd step. '
'Note that profiler has a non-trivial performance overhead, and the '
'output file can be gigantic if profiling many steps.')
flags.DEFINE_boolean(
name='batchnorm_spatial_persistent', default=True,
help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
flags.DEFINE_boolean(
name='enable_get_next_as_optional', default=False,
help='Enable get_next_as_optional behavior in DistributedIterator.')
flags.DEFINE_boolean(
name='enable_checkpoint_and_export', default=False,
help='Whether to enable a checkpoint callback and export the savedmodel.')
flags.DEFINE_string(
name='tpu', default='', help='TPU address to connect to.')
flags.DEFINE_integer(
name='steps_per_loop',
default=500,
help='Number of steps per training loop. Only training step happens '
'inside the loop. Callbacks will not be called inside. Will be capped at '
'steps per epoch.')
flags.DEFINE_boolean(
name='use_tf_while_loop',
default=True,
help='Whether to build a tf.while_loop inside the training loop on the '
'host. Setting it to True is critical to have peak performance on '
'TPU.')
flags.DEFINE_boolean(
name='use_tf_keras_layers', default=False,
help='Whether to use tf.keras.layers instead of tf.python.keras.layers.'
'It only changes imagenet resnet model layers for now. This flag is '
'a temporal flag during transition to tf.keras.layers. Do not use this '
'flag for external usage. this will be removed shortly.')
if model:
flags.DEFINE_string('model', 'resnet50_v1.5',
'Name of model preset. (mobilenet, resnet50_v1.5)')
if optimizer:
flags.DEFINE_string('optimizer', 'resnet50_default',
'Name of optimizer preset. '
'(mobilenet_default, resnet50_default)')
# TODO(kimjaehong): Replace as general hyper-params not only for mobilenet.
flags.DEFINE_float('initial_learning_rate_per_sample', 0.00007,
'Initial value of learning rate per sample for '
'mobilenet_default.')
flags.DEFINE_float('lr_decay_factor', 0.94,
'Learning rate decay factor for mobilenet_default.')
flags.DEFINE_float('num_epochs_per_decay', 2.5,
'Number of epochs per decay for mobilenet_default.')
if pretrained_filepath:
flags.DEFINE_string('pretrained_filepath', '',
'Pretrained file path.')
def get_synth_data(height, width, num_channels, num_classes, dtype):
"""Creates a set of synthetic random data.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
Returns:
A tuple of tensors representing the inputs and labels.
"""
# Synthetic input should be within [0, 255].
inputs = tf.random.truncated_normal([height, width, num_channels],
dtype=dtype,
mean=127,
stddev=60,
name='synthetic_inputs')
labels = tf.random.uniform([1],
minval=0,
maxval=num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
return inputs, labels
def define_pruning_flags():
"""Define flags for pruning methods."""
flags.DEFINE_string('pruning_method', None,
'Pruning method.'
'None (no pruning) or polynomial_decay.')
flags.DEFINE_float('pruning_initial_sparsity', 0.0,
'Initial sparsity for pruning.')
flags.DEFINE_float('pruning_final_sparsity', 0.5,
'Final sparsity for pruning.')
flags.DEFINE_integer('pruning_begin_step', 0,
'Begin step for pruning.')
flags.DEFINE_integer('pruning_end_step', 100000,
'End step for pruning.')
flags.DEFINE_integer('pruning_frequency', 100,
'Frequency for pruning.')
def get_synth_input_fn(height, width, num_channels, num_classes,
dtype=tf.float32, drop_remainder=True):
"""Returns an input function that returns a dataset with random data.
This input_fn returns a data set that iterates over a set of random data and
bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
copy is still included. This used to find the upper throughput bound when
tuning the full input pipeline.
Args:
height: Integer height that will be used to create a fake image tensor.
width: Integer width that will be used to create a fake image tensor.
num_channels: Integer depth that will be used to create a fake image tensor.
num_classes: Number of classes that should be represented in the fake labels
tensor
dtype: Data type for features/images.
drop_remainder: A boolean indicates whether to drop the remainder of the
batches. If True, the batch dimension will be static.
Returns:
An input_fn that can be used in place of a real one to return a dataset
that can be used for iteration.
"""
# pylint: disable=unused-argument
def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
"""Returns dataset filled with random data."""
inputs, labels = get_synth_data(height=height,
width=width,
num_channels=num_channels,
num_classes=num_classes,
dtype=dtype)
# Cast to float32 for Keras model.
labels = tf.cast(labels, dtype=tf.float32)
data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
# `drop_remainder` will make dataset produce outputs with known shapes.
data = data.batch(batch_size, drop_remainder=drop_remainder)
data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return data
return input_fn
def set_cudnn_batchnorm_mode():
"""Set CuDNN batchnorm mode for better performance.
Note: Spatial Persistent mode may lead to accuracy losses for certain
models.
"""
if FLAGS.batchnorm_spatial_persistent:
os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
else:
os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
|
get_gkey
|
Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
# MASKED: get_gkey function (lines 328-358)
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
| 328
| 358
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
add_gkey
|
Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
# MASKED: add_gkey function (lines 360-387)
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
| 360
| 387
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
get_gvalue
|
Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
# MASKED: get_gvalue function (lines 454-484)
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
| 454
| 484
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
get_bkey
|
Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
# MASKED: get_bkey function (lines 867-897)
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
| 867
| 897
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
get_bvalue
|
Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
# MASKED: get_bvalue function (lines 899-929)
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
| 899
| 929
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
__init__
|
Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
# MASKED: __init__ function (lines 1451-1477)
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
| 1,451
| 1,477
|
import os
import math
import logging
from pyaxe import config as config_util
from pyaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class ConfigList:
"""Configuration File Object"""
def __init__(self, keylist, header=None):
"""
Initializes the ConfigList object by tranfsforming
a list of keywords into a structured list including
beams descriptions
keylist: list
List of configuration keys
header: str
the header string
"""
# beam indices which might be found the file
idents = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K',
'L', 'M', 'N', 'O', 'P', 'Q']
# create the (visible) dictionary
self.beams = {}
# create the hidden beam list
self._beams = []
# store the header
self.header = header
# load the general required keywords
self.gkeys = self._find_gkeys(keylist)
# try to load beams as long as there
# are keywords and as long as there
# are candidate beam numbers
iindex = 0
while (len(keylist) > 0 and iindex < len(idents)):
try:
# try to load a beam
self._beams.append(ConfigBeam(idents[iindex], keylist))
self.beams[idents[iindex]] = self._beams[iindex]
except BeamNotFound:
# no information on this beam is in the file
pass
# enhance the counter
iindex += 1
# inform about the useless keywords
if len(keylist) > 0:
_log.info('\nDispensable Keywords: ')
for key in keylist:
_log.info(key)
def __str__(self):
"""String method for the class
The method transforms the configuration
file object into its string representation.
Returns
-------
a string representation of the object
"""
# take the string of the header
rstring = str(self.header) + '\n'
# add the strings for the global keys
for key in self.gkeys:
rstring += str(key)
for beam in self._beams:
rstring += str(beam)
# return the total string
return rstring
def __delitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
del self.gkeys[index]
def __getitem__(self, item):
# find the index of the requested item
index = self._find_gkey(item)
# check whether the item was found
if index > -1:
# return the identified item
return self.gkeys[index].keyvalue
else:
if item in self.beams.keys():
return self.beams[item]
else:
# return NULL
return None
def _find_gkey(self, item):
# set the default return value
found = -1
# go over all items
for index in range(len(self.gkeys)):
# check whether it is the right item
if self.gkeys[index].keyword == item:
# set the return value to the index
found = index
# return the result
return found
def _load_file(self, filename):
"""Configuration file --> keyword list
The method load a configuration file and
extract all valid keyword-keyvalue-comment information
from it. The keyword-keyvalue pairs are
organized and returned as a list of
configuration key objects.
@param filename: name of the configuration file
@type filename: String
@return: list of ConfKey's
@rtype: [ConfKey]
"""
# initialize the liust
keylist = []
# open the file and parse through it
fopen = open(filename, 'r')
for line in fopen:
# strip the line
str_line = line.strip()
# check whether the line contains a keyword
if len(str_line) and str_line[0] != '#':
# create and append the keyword
keylist.append(self._key_from_line(str_line))
# close the file
fopen.close()
# return the list
return keylist
def _get_gkey_index(self, keyword):
"""Retrieve the index of a global keyword
The method searches for the index of
a requested keyword in the list of global
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
kindex = -1
# go over all keys
for index in range(len(self.gkeys)):
# check whether the current key matches
if self.gkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return kindex
def _key_from_line(self, line):
"""Creates a keyword from a line
The method extracts the konfiguration keyword,
the associated value and, if present,
a comment from a line in the configuration file.
A configuration key object representing the extracted
keyword is created and returned.
Parameters
----------
line: list
line to analyze
Returns
-------
configuration key object
"""
# split the line into items
items = line.split()
# for more than one item the
# first item is the keyword
if len(items) > 1:
keyword = items[0].strip()
# check for a comment
cpos = line.rfind(';')
if cpos < 0:
# evaluate the keyvalue
keyvalue = line[line.find(keyword)+len(keyword):].strip()
comment = None
else:
# evalute keyvalue and comment
tmp_val = line[line.find(keyword)+len(keyword):].strip()
keyvalue = tmp_val.split(';')[0].strip()
comment = tmp_val.split(';')[1].strip()
else:
# something's wrong here
err_msg = 'Only one item in: ' + line + ' !'
raise aXeError(err_msg)
# create and return the keyword
return ConfKey(keyword, keyvalue, comment)
def _find_gkeys(self, keylist):
"""Finds and extracts the global keywords
The method finds the all predefined global keywords in
a keyword list. The list of global keywords is
returned. Their counterparts in the input keyword list
are deleted.
Parameters
----------
keylist: list
list of keywords
Returns
-------
keys: list
global keywords
"""
gkeywords = ['INSTRUMENT', 'CAMERA', 'TELAREA',
'SCIENCE_EXT', 'ERRORS_EXT',
'DQ_EXT', 'OPTKEY1', 'OPTVAL1', 'FFNAME', 'DQMASK',
'DRZRESOLA', 'DRZSCALE', 'DRZLAMB0', 'DRZXINI',
'DRZROOT', 'EXPTIME', 'WEIGHT_EXT', 'DRZPFRAC',
'DRZPSCALE', 'DRZKERNEL', 'MODEL_EXT', 'VARIANCE_EXT',
'RDNOISE', 'PSFCOEFFS', 'PSFRANGE', 'IPIXFUNCTION',
'POBJSIZE', 'SMFACTOR']
# initialize the global keylist
# and the list with indices to be deleted
gkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in gkeywords:
# store the index
dindex.append(iindex)
# create and append the new keyword
gkeys.append(ConfKey(key.keyword, key.keyvalue, key.comment))
iindex += 1
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for index in dindex:
del keylist[index]
# return the list of global keys
return gkeys
def _check_gfiles(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
# list of the root of all
# global keys indicating a file
fkeys = ['FFNAME']
# go over all file keywords
for key in fkeys:
# identify the keyword in the list
index = self._get_gkey_index(key)
# check for existence
if index > -1:
# extract the keyvalue
kvalue = self.gkeys[index].keyvalue
# if the keyvalue is NOT None but the file does not exist
if ((kvalue.upper() is not 'NONE') and
(not os.path.isfile(config_util.getCONF(kvalue)))):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(kvalue)))
raise aXeError(err_msg)
def get_gkey(self, keyword):
"""Retrieve a requested global keyword
The method searches the list of global keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
# check whether the keyword exists
if index > -1:
# return the keyword
return self.gkeys[index]
else:
# return the default
return rkey
def add_gkey(self, keyword, keyvalue, comment=None):
"""Add global keyword
The method adds a keyword to the list of global
keywords. In case that the keyword just exists,
it is overwritten, otherwise it is appended
to the global keyword list.
Parameters
----------
keyword: str
name of the requested keyword
keyvalue: any
value of the requested keyword
comment: str
comment for the keyword
"""
# search for the index in the keyword list
index = self._get_gkey_index(keyword)
if index > -1:
# if it matches, copy the data
self.gkeys[index].keyvalue = keyvalue
self.gkeys[index].comment = comment
else:
# the keyword does not yet exist, just create and add it
self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# def drizzle_check(self):
# """Check for drizzle keywords
# The method assures that all necessary drizzle keywords
# are present. Nonexisting keywords are added with default
# values. Finally the value for the drizzle kernel is checked
# against all valid values.
# Returns
# -------
# bool: True if the drizzle kernel is valid
# """
# # list with all valid kernels
# kernels = ['square', 'point', 'turbo', 'gaussian', 'tophat',
# 'lanczos2', 'lanczos3']
# # make sure that some important drizzle keywords are there
# pself = self.setdefault('DRZPSCALE', 1.0)
# pfrac = self.setdefault('DRZPFRAC', 1.0)
# dkernel = self.setdefault('DRZKERNEL', 'square')
# droot = self.setdefault('DRZROOT', 'aXedrizzle')
# # check for valid drizzle kernel
# if dkernel not in kernels:
# return False
# return True
# def setdefault(self, keyword, keyvalue, comment=None):
# """Add global keyword
# The method mimics the setdefault method for dictionary
# objects. A keyword is added with the given value and
# comment, but only in case that it does not yet exist.
# If it exists, nothing is done
# Parameters
# ----------
# keyword: str
# name of the requested keyword
# keyvalue: any
# value of the requested keyword
# comment: str
# comment for the keyword
# Returns
# -------
# The keyword value
# """
# # search for the index in the keyword list
# index = self._get_gkey_index(keyword)
# if index < 0:
# # the keyword does not yet exist, just create and add it
# self.gkeys.append(ConfKey(keyword, keyvalue, comment))
# # extract the keyvalue
# value = self.gkeys[-1].keyvalue
# else:
# # extract the keyvalue
# value = self.gkeys[index].keyvalue
# # return the keyvalue
# return value
def get_gvalue(self, keyword):
"""Retrieve a requested global keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
The keyword value
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_gkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def writeto(self, filename):
"""Save the object to a file
The method saves the object to a file
with name specified in the input.
Parameters
----------
filename: str
name of the file
"""
# destroy the old file
if os.path.isfile(filename):
os.unlink(filename)
# open the new file
ofile = open(filename, 'w')
# write the string to the file
ofile.write(str(self))
# close the file
ofile.close()
def flush(self):
"""Save the object back to file
The method saves the object back to a file
with the identical filename it was read from.
"""
# just use the more general method
self.writeto(self.filename)
def check_files(self, check_glob=True):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# check global files if desired
if check_glob:
self._check_gfiles()
# create the (visible) dictionary
for bkey in self.beams.keys():
n_sens += self.beams[bkey].check_files()
# return the number
# of existing sensitivity files
return n_sens
class ConfigFile(ConfigList):
"""Configuration File Object"""
def __init__(self, filename=None):
"""
Initializes the ConfigFile object either
by reading in a configuration file
or by creating a default configuration file
Parameters
----------
filename: str
name of the configuration file
"""
_log.info(f"Initializing configfile with {filename}")
# check if a filename is given
if filename is None:
# load the default
_log.info('No file given, can do nothing!!')
else:
# safe the file name
self.filename = filename
# create a keyword list
keylist = self._load_file(filename)
# load the header
header = ConfHeader(filename)
super(ConfigFile, self).__init__(keylist, header)
def _get_simul_name(self):
"""Get the filename used in aXeSIM"""
# just add '.simul' and return the result
return self.filename + '.simul'
def confirm_extrkeys(self):
"""Confirm that all keywords for the extraction exist"""
# default is true!
extr_ready = 1
# check existence of 'POBJSIZE'
if self['POBJSIZE'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['POBJSIZE']) < 0.0:
extr_ready = 0
# check existence of 'SMFACTOR'
if self['SMFACTOR'] is None:
extr_ready = 0
# check for reasonable value
elif float(self['SMFACTOR']) < 0.0:
extr_ready = 0
# return the value
return extr_ready
def confirm_lambda_psf(self):
"""Check whether a 'lambda_psf' value is needed, provide one"""
# check whether 'lambda_psf' is needed
if ((self['PSFCOEFFS'] is not None) and
(self['PSFRANGE'] is not None)):
# split the term
psf_range = self['PSFRANGE'].split()
# extract the defined range as float
lambda_min = float(psf_range[0])
lambda_max = float(psf_range[1])
# make 'lambda_psf' to the mean value
lambda_psf = 0.5 * (lambda_max + lambda_min)
else:
# leave it at None
lambda_psf = None
# return the value
return lambda_psf
def axesim_prep(self):
"""Removes modifies some keywords"""
# derive the new configuration file name
new_name = self._get_simul_name()
# check whether the science extension has other
# than the allowed values
if self['SCIENCE_EXT'] != 'SCI' and self['SCIENCE_EXT'] != '2':
# find the index of the sceicne extension
index = self._find_gkey('SCIENCE_EXT')
# check whether the item was found
if index > -1:
# set it to the allowed value
self.gkeys[index].keyvalue = 'SCI'
# check whether the telesocpe are is known
if self['TELAREA'] is None:
# set the telescope are to the
# Hubble default
self.add_gkey('TELAREA', 45238.93)
index = 1
while self['OPTKEY'+str(index)] is not None:
del self['OPTKEY'+str(index)]
del self['OPTVAL'+str(index)]
index += 1
# just make sure that
# the error=- and dq-
# extensions are set
self.add_gkey('ERRORS_EXT', 'ERR')
self.add_gkey('DQ_EXT', 'DQ')
# write the file back
self.writeto(new_name)
# return the baseic filename of the
# simulation configuration file
return os.path.basename(new_name)
class ConfigBeam:
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""
A configuration beam object is intialized. This is done
by either extracting the relevant keywords for a certain
beam from a keyword list or creating a default beam.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# check if a filename is given
if ident is None or keylist is None:
# load the default
_log.info('No ID or no keywords given, can do nothing!!')
else:
# try to load the beam keywords
try:
# store the ident
self.ident = ident
# load the general beam keywords
self.beamkeys = self._find_beamkeys(ident, keylist)
# load the trace keywords
self.trace = ConfigTrace(ident, keylist)
# load the dispersion keywords
self.disp = ConfigDisp(ident, keylist)
# catch a pure CKeyNotFound exception
# which is raised if a beam is competely
# absent in the keyword list
except CKeyNotFound:
raise BeamNotFound(ident)
def __str__(self):
"""String method for the class
The method transforms theconfiguration
beam object into its string representation.
"""
# initialize the return string
rstring = ("\n#-----------\n#\n# Beam {0:s}:\n#\n#-----------\n"
.format(str(self.ident)))
# add the strings for the global keys
for key in self.beamkeys:
rstring += str(key)
# add the string for the trace
rstring += str(self.trace)
# add the string for the dispersion
# solution
rstring += str(self.disp)
# return the total string
return rstring
def __getitem__(self, item):
full_item = item + self.ident
rvalue = self.get_bvalue(full_item)
return rvalue
def __setitem__(self, item, value):
full_item = item + self.ident
index = self._get_bkey_index(full_item)
if index > -1:
self.beamkeys[index].keyvalue = value
def _find_beamkeys(self, ident, keylist):
"""Load the global beam keywords
The method extracts all global beam keywords
from a keyword list. The extracted keywords are returned
as a list. They are removed from the input list.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# list of the root of all globale
# beamword keys
bkeys = ['BEAM', 'MMAG_EXTRACT_', 'MMAG_MARK_', 'XOFF_',
'YOFF_', 'SENSITIVITY_']
# list of optional keywords
okeys = ['PSF_OFFSET_']
# appen the beam identifier to the
# keyword roots to get a list of keywords
# to search for
id_keys = []
for key in bkeys:
id_keys.append(key + ident)
# initiate and fill
# collect a list of optional keywords
opt_keys = []
for key in okeys:
opt_keys.append(key + ident)
# here is some kind of extra
# keyword
# ekey = 'DLD1P_' + ident + '_PRANGE'
opt_keys.append('DLD1P_' + ident + '_PRANGE')
# initialize the global keylist
# and the list with indices to be deleted
bkeys = []
dindex = []
# go over the keylist read in,
# keeping and index variable
iindex = 0
nfound = 0
for key in keylist:
# identify the current keyword in the
# list of possible ones
if key.keyword in id_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the nuber of keywords found
nfound += 1
elif key.keyword in opt_keys:
# store the index
dindex.append(iindex)
# create and append the new keyword
bkeys.append(ConfKey(key.keyword,
key.keyvalue, key.comment))
# enhance the index
iindex += 1
# check whether all keywords were found
if nfound < len(id_keys):
# raise an exeption if not
raise CKeyNotFound('general')
# delete the input keywords which
# have been 'used'
dindex.sort()
dindex.reverse()
for iindex in dindex:
del keylist[iindex]
# return the list of global keys
return bkeys
def _get_bkey_index(self, keyword):
"""Retrieve the index of a beam keyword
The method searches for the index of
a requested keyword in the list of beam
keywords. If the keyword does not exists,
the index -1 is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
index: int
the index of the keyword
"""
# initialize the return value
bindex = -1
# go over all keys
for index in range(len(self.beamkeys)):
# check whether the current key matches
if self.beamkeys[index].keyword == keyword:
# return it if it matches
return index
# return the default
return bindex
def get_bkey(self, keyword):
"""Retrieve a requested beam keyword
The method searches the list of beam keywords
for a fitting keyword. In case that the requested
keyword exists, it is returned.
If not 'None' is returned
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# initialize the return value
rkey = None
# search for the index in the keyword list
index = self._get_bkey_index(keyword)
# ckeck whehter the keyword exists
if index > -1:
# return the keyword
return self.beamkeys[index]
else:
# return the default
return rkey
def get_bvalue(self, keyword):
"""Retrieve a requested beam-keyword value
The method returns the value of the keyword
which matches the requested value.
If there is no matching keyword, 'None'
is returned.
Parameters
----------
keyword: str
name of the requested keyword
Returns
-------
key: str or None
the requested keyword or 'None'
"""
# set the default return value
rvalue = None
# search for the keyword
key = self.get_bkey(keyword)
# check whether it is non-NULL
if key:
# extract the value
rvalue = key.keyvalue
# return the value
return rvalue
def check_files(self):
"""Checks whether all files exist
The method checks whether the files whose names
are within the class data do exist or not.
An error is reported in case that the files
do not exist.
"""
n_sens = 0
# list of the root of all
# beamword keys indicating a file
fkeys = ['SENSITIVITY_']
# append the beam identifier to the
# keyword roots to get the full keyname
for key in fkeys:
full_keyword = key + self.ident
# go over all beam keys
for bkey in self.beamkeys:
# check whether the current keyword is right
# and whether the keyvalue is not 'None'
if ((bkey.keyword is full_keyword) and
(bkey.keyvalue.upper() is not 'NONE')):
# check for the file
if not os.path.isfile(config_util.getCONF(bkey.keyvalue)):
# report an error
err_msg = ("The file: {0:s} does not exist!"
.format(config_util.getCONF(bkey.keyvalue)))
raise aXeError(err_msg)
else:
n_sens += 1
return n_sens
class TwoDimPolyN:
"""Object for a polynomial with 2D variance"""
def __str__(self):
"""The method transforms the 2D polynomial object into its str
representation.
Returns
-------
object: str
string representation of the object
"""
# initialize the return string
rstring = str(self.norder)
for key in self.twodkeys:
rstring += str(key)
# return the total string
return rstring
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
key : ConfListKey
the indexed object
"""
# check whether the index exists
if index > len(self.twodkeys)-1:
# raise an exception
err_msg = "Index: {0:s} does not exist!".format(str(index))
raise aXeError(err_msg)
# return the indexed object
return self.twodkeys[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: ConfListKey
description of the object content
"""
# check whether the index exists
if (index > (len(self.twodkeys))-1):
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif (not isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.twodkeys[index] = obj
def _find_order(self, prefix, ident, keylist):
"""Find the keyword with the polynomial order
The method finds and extracts the keyword
indicating the polynomial degree from
a keyword list. The keyword is returned.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keyword: str
keyword with number of orders
"""
# create the name of the keyword with the
# polynomial order
order_key = prefix + 'ORDER_' + ident
# extract and return the keyword from the
# keyword list
return self._find_key(order_key, keylist)
def _find_twodkeys(self, prefix, ident, keylist):
"""Find the all 2D polynomial keywords
Given a prefix and a beam identifier the method
extracts all orders of the 2D polynomial which
describes the trace or dispersion. The number
of orders expected is taken from the object data.
Parameters
----------
prefix: str
keyword prefix
ident: char
beam identification
keylist: list
list of keywords
Returns
-------
keys: list
list of keywords
"""
# initialize an empty list
twodkeys = []
# for each expected keyword
for ii in range(int(self.norder.keyvalue)+1):
# form the keyword name
twodkey = prefix + ident + '_' + str(ii)
# extract the new keyword
newkey = self._find_key(twodkey, keylist, 1)
if self._check_twodkey(newkey):
# extract the keyword and append it to the list
twodkeys.append(newkey)
else:
raise CKeyLengthWrong(ident, twodkey)
# return the list
return twodkeys
def _find_key(self, keyword, keylist, lkey=0):
"""Extract a certain keyword from the list
The methods searches for a particular keyword
in a keyword list. If found, the keyword is
copied and destroied in the input list.
If not found, an exception is fired.
Parameters
----------
keyword: str
the keyword name
keylist: list
list of keywords
Returns
-------
keyword: str
the extracted keyword
"""
# initialize the index
iindex = 0
# set indicator to "not found"
found = -1
# go over all keys in the list
for key in keylist:
# checke whether the keyword is the desired one
if key.keyword == keyword:
# create a list keyword if desired
if lkey:
nkey = ConfListKey(key.keyword, key.keyvalue, key.comment)
else:
nkey = ConfKey(key.keyword, key.keyvalue, key.comment)
# store the index
found = iindex
# enhance the index
iindex += 1
# fire an exception if nothing was found
if found < 0:
raise CKeyNotFound(keyword)
# delete the keyword from the inlist
else:
del keylist[found]
# return the keyword
return nkey
def _check_twodkey(self, inkey):
"""Check the length of the a field dependent keyword
Field dependent keywords such as the polynimial
coefficients in the trace description and dispersion
solution must have a certain number of values,
which is:
n = m^2/2 + m/2
The method checks whether the number of values
is in agreement with this.
@param inkey: the keyword name
@type inkey: ConfListKey
@return: 1/0
@rtype: int
"""
# determine the length of the list
n = float(len(inkey.kvallist))
# compute the 'order' of the xy-dependence
m = (-1.0 + math.sqrt(1.0+8.0*n))/2.0
# chech whether the 'order' is integer
if math.fabs(m-int(m)) > 1.0e-16:
# no integer -> key length wrong
return 0
# integer -> key length correct
return 1
def str_header(self, description):
"""Create a header string
The method offers to the subclasses the possibility
to have a meaningful string header before the
actual data string.
Parameters
----------
@param description: description of the object content
@type description: string
@return: the header string
@rtype: string
"""
# pre-decoration
rstring = '\n#\n# '
# add description
rstring += description
# add post-decoration
rstring += ':\n#\n'
# return the result
return rstring
class ConfigTrace(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration beam
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DYDX_', ident, keylist)
self.twodkeys = self._find_twodkeys('DYDX_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise TraceNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('Field dependent keyword: ' + e.keyword)
def __str__(self):
"""Returns string representation of the object"""
# create the label or description
description = 'Trace description for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigTrace, self).str_header(description)
# get the data string
rstring += super(ConfigTrace, self).__str__()
# return the result
return rstring
class ConfigDisp(TwoDimPolyN):
"""Configuration Beam object"""
def __init__(self, ident=None, keylist=None):
"""The method initializes a configuration dispersion
object for a given beam identifier.
All necessary keywords are extracted from
an input keyword list.
In case of missing keywords an exception
is fired.
Parameters
----------
ident: char
beam identification
keylist: list
list of keywords
"""
# try to read in the keywords
try:
self.ident = ident
self.norder = self._find_order('DISP_', ident, keylist)
self.twodkeys = self._find_twodkeys('DLDP_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
try:
self.twodkeys = self._find_twodkeys('DLD1P_', ident, keylist)
# raise an exception if keywords are missing
except CKeyNotFound as e:
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
except CKeyLengthWrong as e:
_log.info('\nField dependent keyword: {0:s} has wrong length!'
.format(e.keyword))
raise DispNotFound(ident, e.keyword)
def __str__(self):
"""return string representation of the object"""
# create the label or description
description = 'Dispersion solution for Beam ' + str(self.ident)
# get the string header
rstring = super(ConfigDisp, self).str_header(description)
# get the data string
rstring += super(ConfigDisp, self).__str__()
# return the result
return rstring
class DefConfHeader:
"""Default header for a configuration file"""
def __init__(self):
self.header = []
self.header.append("#-----------------------------------------------"
"------------\n# Default configuration file for aXe"
"\n#\n#-------------------------------------------"
"---------------")
def __str__(self):
"""returns string representation of the object"""
rstring = ''
for line in self.header:
rstring += line
return rstring
class ConfHeader(DefConfHeader):
"""Header class for the configuration file"""
def __init__(self, filename=None):
"""Initializes the configuration header class
The method extracts the header from a configuration
file. If no filename is provided, a default
header is created.
Parameters
----------
filename: str
name of the configuration file
"""
# no filename -> default header
if filename is None:
super(ConfHeader, self).__init__()
else:
# initialize the data list
self.header = []
# intialize the start pointer
start = 1
# open and parse through the file
fopen = open(filename, 'r')
for line in fopen:
# check whether the start pointer is still set
if start:
# strip the line
str_line = line.strip()
# check whether the first character
# is a comment, which qualifies
# the line as part of the header
if ((len(str_line) > 0) and (str_line[0] is '#')):
# append the line to the header data
self.header.append(line.strip()+'\n')
else:
# set the starter pointer to 0,
# thus indicating the end of the header
start = 0
# close the file
fopen.close
class ConfKey:
"""Class for a keyword in a configuration file
This keyword class is a light, but yet versatile
and important class to strore a keyword entry in a
configuration file. All important values are
directly read from the object attributes.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword class
The keyword instance is created using
all input values.
Parameter
---------
keyword: str
the keword name
keyvalue: str
the keyword value
comment: str
the keyword comment
"""
self.keyword = keyword
self.keyvalue = keyvalue
self.comment = comment
def __str__(self):
"""String method for the class
The method creats and returns
the string representation of the
keyword.
Returns
-------
obj: str
string representation of the object
"""
rstring = self.keyword + ' ' + str(self.keyvalue)
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
rstring += '\n'
return rstring
class ConfListKey(ConfKey):
"""Class for a keyword list
The keyword list class is a subclass derived from the
keyword class. In the keyword list class has as an
additional attribute the keyvalues transformed to a list
of floats.
"""
def __init__(self, keyword, keyvalue, comment=None):
"""Constructor for the keyword list class
Initializer for the keyword list class.
The keyword instance is created using
all input values.
Parameters
----------
keyword: str
the keword name
keyvalue: str
the keyword values
comment: str
the keyword comment
"""
# initialize the keyvalue list
self.kvallist = []
# create a traditional keyword instance
super(ConfListKey, self).__init__(keyword, keyvalue, comment)
# split the string keyvalue
vlist = self.keyvalue.split()
for value in vlist:
# append the floats to the list
self.kvallist.append(float(value))
def __getitem__(self, index):
"""Getindex method for the class
The operator method which is called
when an index is requested on a
class instace
test = kkk[0]
Parameters
----------
index: int
the index to address
Returns
-------
obj: float
the indexed object
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index: ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# return the indexed object
return self.kvallist[index]
def __setitem__(self, index, obj):
"""Setindex method for the class
The operator method which is called
when the index of a class instance is
set to a value.
kkk[0] = test
Parameters
----------
index: int
the index to address
obj: list
description of the object content
"""
# check whether the index exists
if index > len(self.kvallist)-1:
# raise an exception
err_msg = 'Index ' + str(index) + ' does not exist!'
raise aXeError(err_msg)
# check whether the input type is correct
elif not (isinstance(type(self[0]), obj)):
# raise an exception
err_msg = ("Object: {0:s} has wrong type: {1:s}!"
.format(str(obj), str(type(obj))))
raise aXeError(err_msg)
# set the index to the input object
self.kvallist[index] = obj
def __str__(self):
"""returns the string representation of the keyword."""
# first comes the keyword
rstring = self.keyword
# append the keyvalues using a default format
for value in self.kvallist:
rstring = rstring + ' %12.6g' % value
# append the comment
if self.comment is not None:
rstring = rstring + ' ; ' + self.comment
# append a linefeed
rstring += '\n'
# return the complete string
return rstring
class ConfError(Exception):
"""Base class for exceptions in this module"""
pass
class CKeyNotFound(ConfError):
"""Error for missing keyword"""
def __init__(self, keyword):
self.keyword = keyword
class BeamNotFound(ConfError):
"""Error for unknown beam """
def __init__(self, ident):
self.ident = ident
class TraceNotFound(ConfError):
"""Error for unknown trace"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class DispNotFound(ConfError):
"""Error for unknown dispersion"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
class CKeyLengthWrong(ConfError):
"""Error for wrong lengt in KeywordList"""
def __init__(self, ident, keyword=None):
self.ident = ident
self.keyword = keyword
|
fetch_filenames
|
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
# MASKED: fetch_filenames function (lines 62-100)
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
| 62
| 100
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
fetch_subject_files
|
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
# MASKED: fetch_subject_files function (lines 103-125)
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
| 103
| 125
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
fetch_conn_matrices
|
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
# MASKED: fetch_conn_matrices function (lines 128-151)
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
| 128
| 151
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
get_timeseries
|
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
# MASKED: get_timeseries function (lines 154-171)
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
| 154
| 171
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
norm_timeseries
|
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
# MASKED: norm_timeseries function (lines 174-187)
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
| 174
| 187
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
get_subject_label
|
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
# MASKED: get_subject_label function (lines 262-280)
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
| 262
| 280
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
load_all_networks
|
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
# MASKED: load_all_networks function (lines 283-307)
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
| 283
| 307
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
get_net_vectors
|
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
# MASKED: get_net_vectors function (lines 310-331)
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
| 310
| 331
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
get_atlas_coords
|
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
# MASKED: get_atlas_coords function (lines 334-348)
|
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
| 334
| 348
|
# Copyright (c) 2017 Sofia Ira Ktena <ira.ktena@imperial.ac.uk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
import os
import csv
import numpy as np
import scipy.io as sio
from sklearn.covariance import GraphLassoCV
import nilearn
from nilearn import connectome
# Output path
save_path = '/vol/dhcp-hcp-data/ABIDE'
# Number of subjects
num_subjects = 1000
# Selected pipeline
pipeline = 'cpac'
# Files to fetch
derivatives = ['rois_ho']
# Get the root folder
root_folder = os.path.join(save_path, 'ABIDE_pcp/cpac/filt_noglobal')
def get_ids(num_subjects=None, short=True):
"""
num_subjects : number of subject IDs to get
short : True of False, specifies whether to get short or long subject IDs
return:
subject_IDs : list of subject IDs (length num_subjects)
"""
if short:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'subject_IDs.txt'), dtype=int)
subject_IDs = subject_IDs.astype(str)
else:
subject_IDs = np.loadtxt(os.path.join(root_folder, 'full_IDs.txt'), dtype=str)
if num_subjects is not None:
subject_IDs = subject_IDs[:num_subjects]
return subject_IDs
def fetch_filenames(subject_list, file_type):
"""
subject_list : list of short subject IDs in string format
file_type : must be one of the available file types
returns:
filenames : list of filetypes (same length as subject_list)
"""
# Specify file mappings for the possible file types
filemapping = {'func_preproc': '_func_preproc.nii.gz',
'rois_aal': '_rois_aal.1D',
'rois_cc200': '_rois_cc200.1D',
'rois_ho': '_rois_ho.1D'}
# The list to be filled
filenames = []
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
# Fill list with requested file paths
for s in subject_list:
try:
if file_type in filemapping:
idx = subject_IDs.index(s)
pattern = full_IDs[idx] + filemapping[file_type]
else:
pattern = s + file_type
filenames.append(os.path.join(root_folder, s, pattern))
except ValueError:
# Return N/A if subject ID is not found
filenames.append('N/A')
return filenames
def fetch_subject_files(subjectID):
"""
subjectID : short subject ID for which list of available files are fetched
returns:
onlyfiles : list of absolute paths for available subject files
"""
# Load subject ID lists
subject_IDs = get_ids(short=True)
subject_IDs = subject_IDs.tolist()
full_IDs = get_ids(short=False)
try:
idx = subject_IDs.index(subjectID)
subject_folder = os.path.join(root_folder, subjectID)
onlyfiles = [os.path.join(subject_folder, f) for f in os.listdir(subject_folder)
if os.path.isfile(os.path.join(subject_folder, f))]
except ValueError:
onlyfiles = []
return onlyfiles
def fetch_conn_matrices(subject_list, atlas_name, kind):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
kind : the kind of correlation used to estimate the matrices, i.e.
returns:
connectivity : list of square connectivity matrices, one for each subject in subject_list
"""
conn_files = fetch_filenames(subject_list,
'_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
conn_matrices = []
for fl in conn_files:
print("Reading connectivity file %s" % fl)
try:
mat = sio.loadmat(fl)['connectivity']
conn_matrices.append(mat)
except IOError:
print("File %s does not exist" % fl)
return conn_matrices
def get_timeseries(subject_list, atlas_name):
"""
subject_list : list of short subject IDs in string format
atlas_name : the atlas based on which the timeseries are generated e.g. aal, cc200
returns:
ts : list of timeseries arrays, each of shape (timepoints x regions)
"""
ts_files = fetch_filenames(subject_list, 'rois_' + atlas_name)
ts = []
for fl in ts_files:
print("Reading timeseries file %s" % fl)
ts.append(np.loadtxt(fl, skiprows=0))
return ts
def norm_timeseries(ts_list):
"""
ts_list : list of timeseries arrays, each of shape (timepoints x regions)
returns:
norm_ts : list of normalised timeseries arrays, same shape as ts_list
"""
norm_ts = []
for ts in ts_list:
norm_ts.append(nilearn.signal.clean(ts, detrend=False))
return norm_ts
def subject_connectivity(timeseries, subject, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : timeseries table for subject (timepoints x regions)
subject : the subject short ID
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
print("Estimating %s matrix for subject %s" % (kind, subject))
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
covariance_estimator.fit(timeseries)
connectivity = covariance_estimator.covariance_
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity = conn_measure.fit_transform([timeseries])[0]
if save:
subject_file = os.path.join(save_path, subject,
subject + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity})
return connectivity
def group_connectivity(timeseries, subject_list, atlas_name, kind, save=True, save_path=root_folder):
"""
timeseries : list of timeseries tables for subjects (timepoints x regions)
subject_list : the subject short IDs list
atlas_name : name of the atlas used
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
save : save the connectivity matrix to a file
save_path : specify path to save the matrix if different from subject folder
returns:
connectivity : connectivity matrix (regions x regions)
"""
if kind == 'lasso':
# Graph Lasso estimator
covariance_estimator = GraphLassoCV(verbose=1)
connectivity_matrices = []
for i, ts in enumerate(timeseries):
covariance_estimator.fit(ts)
connectivity = covariance_estimator.covariance_
connectivity_matrices.append(connectivity)
print('Covariance matrix has shape {0}.'.format(connectivity.shape))
elif kind in ['tangent', 'partial correlation', 'correlation']:
conn_measure = connectome.ConnectivityMeasure(kind=kind)
connectivity_matrices = conn_measure.fit_transform(timeseries)
if save:
for i, subject in enumerate(subject_list):
subject_file = os.path.join(save_path, subject_list[i],
subject_list[i] + '_' + atlas_name + '_' + kind.replace(' ', '_') + '.mat')
sio.savemat(subject_file, {'connectivity': connectivity_matrices[i]})
print("Saving connectivity matrix to %s" % subject_file)
return connectivity_matrices
def get_subject_label(subject_list, label_name):
"""
subject_list : the subject short IDs list
label_name : name of the label to be retrieved
returns:
label : dictionary of subject labels
"""
label = {}
with open(os.path.join(save_path, 'ABIDE_pcp/Phenotypic_V1_0b_preprocessed1.csv')) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['subject'] in subject_list:
label[row['subject']] = row[label_name]
return label
def load_all_networks(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
all_networks : list of connectivity matrices (regions x regions)
"""
all_networks = []
for subject in subject_list:
fl = os.path.join(root_folder, subject,
subject + "_" + atlas_name + "_" + kind + ".mat")
matrix = sio.loadmat(fl)['connectivity']
if atlas_name == 'ho':
matrix = np.delete(matrix, 82, axis=0)
matrix = np.delete(matrix, 82, axis=1)
all_networks.append(matrix)
# all_networks=np.array(all_networks)
return all_networks
def get_net_vectors(subject_list, kind, atlas_name="aal"):
"""
subject_list : the subject short IDs list
kind : the kind of connectivity to be used, e.g. lasso, partial correlation, correlation
atlas_name : name of the atlas used
returns:
matrix : matrix of connectivity vectors (num_subjects x num_connections)
"""
# This is an alternative implementation
networks = load_all_networks(subject_list, kind, atlas_name=atlas_name)
# Get Fisher transformed matrices
norm_networks = [np.arctanh(mat) for mat in networks]
# Get upper diagonal indices
idx = np.triu_indices_from(norm_networks[0], 1)
# Get vectorised matrices
vec_networks = [mat[idx] for mat in norm_networks]
# Each subject should be a row of the matrix
matrix = np.vstack(vec_networks)
return matrix
def get_atlas_coords(atlas_name='ho'):
"""
atlas_name : name of the atlas used
returns:
matrix : matrix of roi 3D coordinates in MNI space (num_rois x 3)
"""
coords_file = os.path.join(root_folder, atlas_name + '_coords.csv')
coords = np.loadtxt(coords_file, delimiter=',')
if atlas_name == 'ho':
coords = np.delete(coords, 82, axis=0)
return coords
|
_CheckIamPermissions
|
Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
# MASKED: _CheckIamPermissions function (lines 180-232)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
| 180
| 232
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
_CreateCloudBuild
|
Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
# MASKED: _CreateCloudBuild function (lines 235-266)
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
| 235
| 266
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
GetDaisyBucketName
|
Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
# MASKED: GetDaisyBucketName function (lines 269-287)
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
| 269
| 287
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
AppendNetworkAndSubnetArgs
|
Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
# MASKED: AppendNetworkAndSubnetArgs function (lines 318-329)
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
| 318
| 329
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
MakeGcsObjectOrPathUri
|
Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
# MASKED: MakeGcsObjectOrPathUri function (lines 621-638)
|
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
| 621
| 638
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
StreamWithFilter
|
Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
# MASKED: StreamWithFilter function (lines 76-119)
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
| 76
| 119
|
# -*- coding: utf-8 -*- #
# Copyright 2017 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for running Daisy builds on Google Container Builder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import time
from apitools.base.py import encoding
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.api_lib.cloudbuild import logs as cb_logs
from googlecloudsdk.api_lib.cloudresourcemanager import projects_api
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.api_lib.services import enable_api as services_api
from googlecloudsdk.api_lib.storage import storage_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.cloudbuild import execution
from googlecloudsdk.command_lib.compute.sole_tenancy import util as sole_tenancy_util
from googlecloudsdk.command_lib.projects import util as projects_util
from googlecloudsdk.core import exceptions
from googlecloudsdk.core import execution_utils
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
from googlecloudsdk.core.console import console_io
import six
_IMAGE_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_import:{}'
_IMAGE_EXPORT_BUILDER = 'gcr.io/compute-image-tools/gce_vm_image_export:{}'
_OVF_IMPORT_BUILDER = 'gcr.io/compute-image-tools/gce_ovf_import:{}'
_DEFAULT_BUILDER_VERSION = 'release'
SERVICE_ACCOUNT_ROLES = [
'roles/iam.serviceAccountUser',
'roles/iam.serviceAccountTokenCreator']
class FilteredLogTailer(cb_logs.LogTailer):
"""Subclass of LogTailer that allows for filtering."""
def _PrintLogLine(self, text):
"""Override PrintLogLine method to use self.filter."""
if self.filter:
output_lines = text.splitlines()
for line in output_lines:
for match in self.filter:
if line.startswith(match):
self.out.Print(line)
break
else:
self.out.Print(text)
class CloudBuildClientWithFiltering(cb_logs.CloudBuildClient):
"""Subclass of CloudBuildClient that allows filtering."""
def StreamWithFilter(self, build_ref, backoff, output_filter=None):
"""Stream the logs for a build using whitelist filter.
Args:
build_ref: Build reference, The build whose logs shall be streamed.
backoff: A function that takes the current elapsed time
and returns the next sleep length. Both are in seconds.
output_filter: List of strings, The output will only be shown if the line
starts with one of the strings in the list.
Raises:
NoLogsBucketException: If the build does not specify a logsBucket.
Returns:
Build message, The completed or terminated build as read for the final
poll.
"""
build = self.GetBuild(build_ref)
log_tailer = FilteredLogTailer.FromBuild(build)
log_tailer.filter = output_filter
statuses = self.messages.Build.StatusValueValuesEnum
working_statuses = [
statuses.QUEUED,
statuses.WORKING,
]
seconds_between_poll = backoff(0)
seconds_elapsed = 0
while build.status in working_statuses:
log_tailer.Poll()
time.sleep(seconds_between_poll)
build = self.GetBuild(build_ref)
seconds_elapsed += seconds_between_poll
seconds_between_poll = backoff(seconds_elapsed)
# Poll the logs one final time to ensure we have everything. We know this
# final poll will get the full log contents because GCS is strongly
# consistent and Container Builder waits for logs to finish pushing before
# marking the build complete.
log_tailer.Poll(is_last=True)
return build
class FailedBuildException(exceptions.Error):
"""Exception for builds that did not succeed."""
def __init__(self, build):
super(FailedBuildException,
self).__init__('build {id} completed with status "{status}"'.format(
id=build.id, status=build.status))
class SubnetException(exceptions.Error):
"""Exception for subnet related errors."""
class ImageOperation(object):
"""Enum representing image operation."""
IMPORT = 'import'
EXPORT = 'export'
def AddCommonDaisyArgs(parser, add_log_location=True):
"""Common arguments for Daisy builds."""
if add_log_location:
parser.add_argument(
'--log-location',
help='Directory in Cloud Storage to hold build logs. If not '
'set, ```gs://<project num>.cloudbuild-logs.googleusercontent.com/``` '
'is created and used.',
)
parser.add_argument(
'--timeout',
type=arg_parsers.Duration(),
default='2h',
help="""\
Maximum time a build can last before it fails as "TIMEOUT".
For example, specifying `2h` fails the process after 2 hours.
See $ gcloud topic datetimes for information about duration formats.
""")
base.ASYNC_FLAG.AddToParser(parser)
def AddExtraCommonDaisyArgs(parser):
"""Extra common arguments for Daisy builds."""
parser.add_argument(
'--docker-image-tag',
default=_DEFAULT_BUILDER_VERSION,
hidden=True,
help="""\
Specify which docker image tag (of tools from compute-image-tools)
should be used for this command. By default it's "release", while
"latest" is supported as well. There may be more versions supported in
the future.
"""
)
def _CheckIamPermissions(project_id):
"""Check for needed IAM permissions and prompt to add if missing.
Args:
project_id: A string with the name of the project.
"""
project = projects_api.Get(project_id)
# If the user's project doesn't have cloudbuild enabled yet, then the service
# account won't even exist. If so, then ask to enable it before continuing.
# Also prompt them to enable Stackdriver Logging if they haven't yet.
expected_services = ['cloudbuild.googleapis.com', 'logging.googleapis.com']
for service_name in expected_services:
if not services_api.IsServiceEnabled(project.projectId, service_name):
# TODO(b/112757283): Split this out into a separate library.
prompt_message = (
'The "{0}" service is not enabled for this project. '
'It is required for this operation.\n').format(service_name)
console_io.PromptContinue(
prompt_message,
'Would you like to enable this service?',
throw_if_unattended=True,
cancel_on_no=True)
services_api.EnableService(project.projectId, service_name)
# Now that we're sure the service account exists, actually check permissions.
service_account = 'serviceAccount:{0}@cloudbuild.gserviceaccount.com'.format(
project.projectNumber)
expected_permissions = {'roles/compute.admin': service_account}
for role in SERVICE_ACCOUNT_ROLES:
expected_permissions[role] = service_account
permissions = projects_api.GetIamPolicy(project_id)
for binding in permissions.bindings:
if expected_permissions.get(binding.role) in binding.members:
del expected_permissions[binding.role]
if expected_permissions:
ep_table = [
'{0} {1}'.format(role, account)
for role, account in expected_permissions.items()
]
prompt_message = (
'The following IAM permissions are needed for this operation:\n'
'[{0}]\n'.format('\n'.join(ep_table)))
console_io.PromptContinue(
message=prompt_message,
prompt_string='Would you like to add the permissions',
throw_if_unattended=True,
cancel_on_no=True)
for role, account in expected_permissions.items():
log.info('Adding [{0}] to [{1}]'.format(account, role))
projects_api.AddIamPolicyBinding(project_id, account, role)
def _CreateCloudBuild(build_config, client, messages):
"""Create a build in cloud build.
Args:
build_config: A cloud build Build message.
client: The cloud build api client.
messages: The cloud build api messages module.
Returns:
Tuple containing a cloud build build object and the resource reference
for that build.
"""
log.debug('submitting build: {0}'.format(repr(build_config)))
op = client.projects_builds.Create(
messages.CloudbuildProjectsBuildsCreateRequest(
build=build_config, projectId=properties.VALUES.core.project.Get()))
json = encoding.MessageToJson(op.metadata)
build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build
build_ref = resources.REGISTRY.Create(
collection='cloudbuild.projects.builds',
projectId=build.projectId,
id=build.id)
log.CreatedResource(build_ref)
if build.logUrl:
log.status.Print('Logs are available at [{0}].'.format(build.logUrl))
else:
log.status.Print('Logs are available in the Cloud Console.')
return build, build_ref
def GetDaisyBucketName(bucket_location=None):
"""Determine bucket name for daisy.
Args:
bucket_location: str, specified bucket location.
Returns:
str, bucket name for daisy.
"""
project = properties.VALUES.core.project.GetOrFail()
safe_project = project.replace(':', '-')
safe_project = safe_project.replace('.', '-')
bucket_name = '{0}-daisy-bkt'.format(safe_project)
if bucket_location:
bucket_name = '{0}-{1}'.format(bucket_name, bucket_location).lower()
safe_bucket_name = _GetSafeBucketName(bucket_name)
# TODO (b/117668144): Make Daisy scratch bucket ACLs same as
# source/destination bucket
return safe_bucket_name
def _GetSafeBucketName(bucket_name):
# Rules are from https://cloud.google.com/storage/docs/naming.
# Bucket name can't contain "google".
bucket_name = bucket_name.replace('google', 'go-ogle')
# Bucket name can't start with "goog". Workaround for b/128691621
bucket_name = bucket_name[:4].replace('goog', 'go-og') + bucket_name[4:]
return bucket_name
def GetSubnetRegion():
"""Gets region from global properties/args that should be used for subnet arg.
Returns:
str, region
Raises:
SubnetException: if region couldn't be inferred.
"""
if properties.VALUES.compute.zone.Get():
return utils.ZoneNameToRegionName(properties.VALUES.compute.zone.Get())
elif properties.VALUES.compute.region.Get():
return properties.VALUES.compute.region.Get()
raise SubnetException('Region or zone should be specified.')
def AppendNetworkAndSubnetArgs(args, builder_args):
"""Extracts network/subnet out of CLI args and append for importer.
Args:
args: list of str, CLI args that might contain network/subnet args.
builder_args: list of str, args for builder.
"""
if args.subnet:
AppendArg(builder_args, 'subnet', args.subnet.lower())
if args.network:
AppendArg(builder_args, 'network', args.network.lower())
def RunImageImport(args, import_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_import on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
import_args: A list of key-value pairs to pass to importer.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_IMPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, import_args, tags, output_filter)
def RunImageExport(args, export_args, tags, output_filter,
docker_image_tag=_DEFAULT_BUILDER_VERSION):
"""Run a build over gce_vm_image_export on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
export_args: A list of key-value pairs to pass to exporter.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
docker_image_tag: Specified docker image tag.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
builder = _IMAGE_EXPORT_BUILDER.format(docker_image_tag)
return RunImageCloudBuild(args, builder, export_args, tags, output_filter)
def RunImageCloudBuild(args, builder, builder_args, tags, output_filter):
"""Run a build related to image on Google Cloud Builder.
Args:
args: An argparse namespace. All the arguments that were provided to this
command invocation.
builder: Path to builder image.
builder_args: A list of key-value pairs to pass to builder.
tags: A list of strings for adding tags to the Argo build.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
return _RunCloudBuild(args, builder, builder_args,
['gce-daisy'] + tags, output_filter, args.log_location)
def GetDaisyTimeout(args):
# Make Daisy time out before gcloud by shaving off 2% from the timeout time,
# up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
daisy_timeout = args.timeout - min(two_percent, 300)
return daisy_timeout
def _RunCloudBuild(args,
builder,
build_args,
build_tags=None,
output_filter=None,
log_location=None,
backoff=lambda elapsed: 1):
"""Run a build with a specific builder on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
builder: path to builder image
build_args: args to be sent to builder
build_tags: tags to be attached to the build
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
log_location: GCS path to directory where logs will be stored.
backoff: A function that takes the current elapsed time and returns
the next sleep length. Both are in seconds.
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
client = cloudbuild_util.GetClientInstance()
messages = cloudbuild_util.GetMessagesModule()
# Create the build request.
build_config = messages.Build(
steps=[
messages.BuildStep(
name=builder,
args=build_args,
),
],
tags=build_tags,
timeout='{0}s'.format(args.timeout),
)
if log_location:
gcs_log_dir = resources.REGISTRY.Parse(
args.log_location, collection='storage.objects')
build_config.logsBucket = ('gs://{0}/{1}'.format(gcs_log_dir.bucket,
gcs_log_dir.object))
# Start the build.
build, build_ref = _CreateCloudBuild(build_config, client, messages)
# If the command is run --async, we just print out a reference to the build.
if args.async_:
return build
mash_handler = execution.MashHandler(
execution.GetCancelBuildHandler(client, messages, build_ref))
# Otherwise, logs are streamed from GCS.
with execution_utils.CtrlCSection(mash_handler):
build = CloudBuildClientWithFiltering(client, messages).StreamWithFilter(
build_ref, backoff, output_filter=output_filter)
if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
log.status.Print(
'Your build timed out. Use the [--timeout=DURATION] flag to change '
'the timeout threshold.')
if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
raise FailedBuildException(build)
return build
def RunOVFImportBuild(args, compute_client, instance_name, source_uri,
no_guest_environment, can_ip_forward, deletion_protection,
description, labels, machine_type, network, network_tier,
subnet, private_network_ip, no_restart_on_failure, os,
tags, zone, project, output_filter,
compute_release_track):
"""Run a OVF import build on Google Cloud Builder.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
compute_client: Google Compute Engine client.
instance_name: Name of the instance to be imported.
source_uri: A GCS path to OVA or OVF package.
no_guest_environment: If set to True, Google Guest Environment won't be
installed on the boot disk of the VM.
can_ip_forward: If set to True, allows the instances to send and receive
packets with non-matching destination or source IP addresses.
deletion_protection: Enables deletion protection for the instance.
description: Specifies a textual description of the instances.
labels: List of label KEY=VALUE pairs to add to the instance.
machine_type: Specifies the machine type used for the instances.
network: Specifies the network that the instances will be part of.
network_tier: Specifies the network tier of the interface. NETWORK_TIER must
be one of: PREMIUM, STANDARD.
subnet: Specifies the subnet that the instances will be part of.
private_network_ip: Specifies the RFC1918 IP to assign to the instance.
no_restart_on_failure: The instances will NOT be restarted if they are
terminated by Compute Engine.
os: Specifies the OS of the boot disk being imported.
tags: A list of strings for adding tags to the Argo build.
zone: The GCP zone to tell Daisy to do work in. If unspecified, defaults to
wherever the Argo runner happens to be.
project: The Google Cloud Platform project name to use for OVF import.
output_filter: A list of strings indicating what lines from the log should
be output. Only lines that start with one of the strings in output_filter
will be displayed.
compute_release_track: release track to be used for Compute API calls. One
of - "alpha", "beta" or ""
Returns:
A build object that either streams the output or is displayed as a
link to the build.
Raises:
FailedBuildException: If the build is completed and not 'SUCCESS'.
"""
project_id = projects_util.ParseProject(
properties.VALUES.core.project.GetOrFail())
_CheckIamPermissions(project_id)
# Make OVF import time-out before gcloud by shaving off 2% from the timeout
# time, up to a max of 5m (300s).
two_percent = int(args.timeout * 0.02)
ovf_import_timeout = args.timeout - min(two_percent, 300)
ovf_importer_args = []
AppendArg(ovf_importer_args, 'instance-names', instance_name)
AppendArg(ovf_importer_args, 'client-id', 'gcloud')
AppendArg(ovf_importer_args, 'ovf-gcs-path', source_uri)
AppendBoolArg(ovf_importer_args, 'no-guest-environment',
no_guest_environment)
AppendBoolArg(ovf_importer_args, 'can-ip-forward', can_ip_forward)
AppendBoolArg(ovf_importer_args, 'deletion-protection', deletion_protection)
AppendArg(ovf_importer_args, 'description', description)
if labels:
AppendArg(ovf_importer_args, 'labels',
','.join(['{}={}'.format(k, v) for k, v in labels.items()]))
AppendArg(ovf_importer_args, 'machine-type', machine_type)
AppendArg(ovf_importer_args, 'network', network)
AppendArg(ovf_importer_args, 'network-tier', network_tier)
AppendArg(ovf_importer_args, 'subnet', subnet)
AppendArg(ovf_importer_args, 'private-network-ip', private_network_ip)
AppendBoolArg(ovf_importer_args, 'no-restart-on-failure',
no_restart_on_failure)
AppendArg(ovf_importer_args, 'os', os)
if tags:
AppendArg(ovf_importer_args, 'tags', ','.join(tags))
AppendArg(ovf_importer_args, 'zone', zone)
AppendArg(ovf_importer_args, 'timeout', ovf_import_timeout, '-{0}={1}s')
AppendArg(ovf_importer_args, 'project', project)
_AppendNodeAffinityLabelArgs(ovf_importer_args, args, compute_client.messages)
if compute_release_track:
AppendArg(ovf_importer_args, 'release-track', compute_release_track)
build_tags = ['gce-ovf-import']
backoff = lambda elapsed: 2 if elapsed < 30 else 15
return _RunCloudBuild(args, _OVF_IMPORT_BUILDER.format(args.docker_image_tag),
ovf_importer_args, build_tags, output_filter,
backoff=backoff)
def _AppendNodeAffinityLabelArgs(
ovf_importer_args, args, compute_client_messages):
node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
args, compute_client_messages)
for node_affinity in node_affinities:
AppendArg(ovf_importer_args, 'node-affinity-label',
_BuildOvfImporterNodeAffinityFlagValue(node_affinity))
def _BuildOvfImporterNodeAffinityFlagValue(node_affinity):
node_affinity_flag = node_affinity.key + ',' + six.text_type(
node_affinity.operator)
for value in node_affinity.values:
node_affinity_flag += ',' + value
return node_affinity_flag
def AppendArg(args, name, arg, format_pattern='-{0}={1}'):
if arg:
args.append(format_pattern.format(name, arg))
def AppendBoolArg(args, name, arg=True):
AppendArg(args, name, arg, '-{0}')
def MakeGcsUri(uri):
obj_ref = resources.REGISTRY.Parse(uri)
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
def MakeGcsObjectOrPathUri(uri):
"""Creates Google Cloud Storage URI for an object or a path.
Raises storage_util.InvalidObjectNameError if a path contains only bucket
name.
Args:
uri: a string to a Google Cloud Storage object or a path. Can be a gs:// or
an https:// variant.
Returns:
Google Cloud Storage URI for an object or a path.
"""
obj_ref = resources.REGISTRY.Parse(uri)
if hasattr(obj_ref, 'object'):
return 'gs://{0}/{1}'.format(obj_ref.bucket, obj_ref.object)
else:
raise storage_util.InvalidObjectNameError(uri, 'Missing object name')
|
setup_wandb
|
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
# MASKED: setup_wandb function (lines 233-255)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
| 233
| 255
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
setup_comet
|
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
# MASKED: setup_comet function (lines 257-285)
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
| 257
| 285
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
prediction_loop
|
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
# MASKED: prediction_loop function (lines 287-378)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
| 287
| 378
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
log
|
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
# MASKED: log function (lines 380-416)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
| 380
| 416
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
run_model
|
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
# MASKED: run_model function (lines 716-750)
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
| 716
| 750
|
"""Tensorflow trainer class."""
import datetime
import math
import os
import warnings
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.distribute.values import PerReplica
from .integrations import is_comet_available, is_wandb_available
from .modeling_tf_utils import TFPreTrainedModel
from .optimization_tf import GradientAccumulator, create_optimizer
from .trainer_utils import PREFIX_CHECKPOINT_DIR, EvalPrediction, PredictionOutput, set_seed
from .training_args_tf import TFTrainingArguments
from .utils import logging
if is_wandb_available():
import wandb
if is_comet_available():
import comet_ml
logger = logging.get_logger(__name__)
class TFTrainer:
"""
TFTrainer is a simple but feature-complete training and eval loop for TensorFlow,
optimized for 🤗 Transformers.
Args:
model (:class:`~transformers.TFPreTrainedModel`):
The model to train, evaluate or use for predictions.
args (:class:`~transformers.TFTrainingArguments`):
The arguments to tweak training.
train_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for training. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
The dataset to use for evaluation. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when
using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling
``model(features, **labels)``.
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
tb_writer (:obj:`tf.summary.SummaryWriter`, `optional`):
Object to write to TensorBoard.
optimizers (:obj:`Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule]`, `optional`):
A tuple containing the optimizer and the scheduler to use. The optimizer default to an instance of
:class:`tf.keras.optimizers.Adam` if :obj:`args.weight_decay_rate` is 0 else an instance of
:class:`~transformers.AdamWeightDecay`. The scheduler will default to an instance of
:class:`tf.keras.optimizers.schedules.PolynomialDecay` if :obj:`args.num_warmup_steps` is 0 else
an instance of :class:`~transformers.WarmUp`.
kwargs:
Deprecated keyword arguments.
"""
def __init__(
self,
model: TFPreTrainedModel,
args: TFTrainingArguments,
train_dataset: Optional[tf.data.Dataset] = None,
eval_dataset: Optional[tf.data.Dataset] = None,
compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None,
tb_writer: Optional[tf.summary.SummaryWriter] = None,
optimizers: Tuple[tf.keras.optimizers.Optimizer, tf.keras.optimizers.schedules.LearningRateSchedule] = (
None,
None,
),
**kwargs,
):
assert parse(tf.__version__).release >= (2, 2, 0), (
"You need to run the TensorFlow trainer with at least the version 2.2.0, your version is %r "
% tf.__version__
)
self.model = model
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.optimizer, self.lr_scheduler = optimizers
self.gradient_accumulator = GradientAccumulator()
self.global_step = 0
self.epoch_logging = 0
if "prediction_loss_only" in kwargs:
warnings.warn(
"Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a future version. Use `args.prediction_loss_only` instead.",
FutureWarning,
)
self.args.prediction_loss_only = kwargs.pop("prediction_loss_only")
assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}."
if tb_writer is not None:
self.tb_writer = tb_writer
else:
self.tb_writer = tf.summary.create_file_writer(self.args.logging_dir)
if is_wandb_available():
self.setup_wandb()
elif os.environ.get("WANDB_DISABLED") != "true":
logger.info(
"You are instantiating a Trainer but W&B is not installed. To use wandb logging, "
"run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface."
)
if is_comet_available():
self.setup_comet()
elif os.environ.get("COMET_MODE") != "DISABLED":
logger.info(
"To use comet_ml logging, run `pip/conda install comet_ml` "
"see https://www.comet.ml/docs/python-sdk/huggingface/"
)
set_seed(self.args.seed)
def get_train_tfdataset(self) -> tf.data.Dataset:
"""
Returns the training :class:`~tf.data.Dataset`.
Subclass and override this method if you want to inject some custom behavior.
"""
if self.train_dataset is None:
raise ValueError("Trainer: training requires a train_dataset.")
self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps
self.num_train_examples = tf.data.experimental.cardinality(self.train_dataset).numpy()
if self.num_train_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
ds = (
self.train_dataset.repeat()
.shuffle(self.num_train_examples, seed=self.args.seed)
.batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds)
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset:
"""
Returns the evaluation :class:`~tf.data.Dataset`.
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features,
labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels``
is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If
``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the
loss is instead calculated by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
if eval_dataset is None and self.eval_dataset is None:
raise ValueError("Trainer: evaluation requires an eval_dataset.")
eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset
num_examples = tf.data.experimental.cardinality(eval_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
eval_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset:
"""
Returns a test :class:`~tf.data.Dataset`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is
a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is
calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such
as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated
by calling ``model(features, **labels)``.
Subclass and override this method if you want to inject some custom behavior.
"""
num_examples = tf.data.experimental.cardinality(test_dataset).numpy()
if num_examples < 0:
raise ValueError("The training dataset must have an asserted cardinality")
approx = math.floor if self.args.dataloader_drop_last else math.ceil
steps = approx(num_examples / self.args.eval_batch_size)
ds = (
test_dataset.repeat()
.batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last)
.prefetch(tf.data.experimental.AUTOTUNE)
)
return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
def create_optimizer_and_scheduler(self, num_training_steps: int):
"""
Setup the optimizer and the learning rate scheduler.
We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the
TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
"""
if not self.optimizer and not self.lr_scheduler:
self.optimizer, self.lr_scheduler = create_optimizer(
self.args.learning_rate,
num_training_steps,
self.args.warmup_steps,
adam_beta1=self.args.adam_beta1,
adam_beta2=self.args.adam_beta2,
adam_epsilon=self.args.adam_epsilon,
weight_decay_rate=self.args.weight_decay,
power=self.args.poly_power,
)
def setup_wandb(self):
"""
Setup the optional Weights & Biases (`wandb`) integration.
One can subclass and override this method to customize the setup if needed. Find more information
`here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables:
Environment:
WANDB_PROJECT:
(Optional): str - "huggingface" by default, set this to a custom string to store results in a different project
WANDB_DISABLED:
(Optional): boolean - defaults to false, set to "true" to disable wandb entirely
"""
if hasattr(self, "_setup_wandb"):
warnings.warn(
"The `_setup_wandb` method is deprecated and won't be called in a future version, define `setup_wandb` in your subclass.",
FutureWarning,
)
return self._setup_wandb()
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()}
wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
def setup_comet(self):
"""
Setup the optional Comet.ml integration.
Environment:
COMET_MODE:
(Optional): str - "OFFLINE", "ONLINE", or "DISABLED"
COMET_PROJECT_NAME:
(Optional): str - Comet.ml project name for experiments
COMET_OFFLINE_DIRECTORY:
(Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE"
For a number of configurable items in the environment,
see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
"""
comet_mode = os.getenv("COMET_MODE", "ONLINE").upper()
args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")}
experiment = None
if comet_mode == "ONLINE":
experiment = comet_ml.Experiment(**args)
logger.info("Automatic Comet.ml online logging enabled")
elif comet_mode == "OFFLINE":
args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./")
experiment = comet_ml.OfflineExperiment(**args)
logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished")
if experiment is not None:
experiment._set_model_graph(self.model, framework="transformers")
experiment._log_parameters(self.args, prefix="args/", framework="transformers")
experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
def prediction_loop(
self,
dataset: tf.data.Dataset,
steps: int,
num_examples: int,
description: str,
prediction_loss_only: Optional[bool] = None,
) -> PredictionOutput:
"""
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and
:func:`~transformers.TFTrainer.predict`.
Works both with or without labels.
"""
if hasattr(self, "_prediction_loop"):
warnings.warn(
"The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.",
FutureWarning,
)
return self._prediction_loop(
dataset, steps, num_examples, description, prediction_loss_only=prediction_loss_only
)
prediction_loss_only = (
prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only
)
logger.info("***** Running %s *****", description)
logger.info(" Num examples = %d", num_examples)
logger.info(" Batch size = %d", self.args.eval_batch_size)
label_ids: np.ndarray = None
preds: np.ndarray = None
self.eval_loss = tf.keras.metrics.Sum()
# Reset the past mems state at the beginning of the evaluation if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(dataset):
logits = self.distributed_prediction_steps(batch)
_, labels = batch
if not prediction_loss_only:
if isinstance(logits, tuple):
logits = logits[0]
if isinstance(labels, tuple):
labels = labels[0]
if self.args.n_replicas > 1:
for val in logits.values:
if preds is None:
preds = val.numpy()
else:
preds = np.append(preds, val.numpy(), axis=0)
for val in labels.values:
if label_ids is None:
label_ids = val.numpy()
else:
label_ids = np.append(label_ids, val.numpy(), axis=0)
else:
if preds is None:
preds = logits.numpy()
else:
preds = np.append(preds, logits.numpy(), axis=0)
if label_ids is None:
label_ids = labels.numpy()
else:
label_ids = np.append(label_ids, labels.numpy(), axis=0)
if step == steps:
break
if self.compute_metrics is not None and preds is not None and label_ids is not None:
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
metrics["eval_loss"] = self.eval_loss.result().numpy() / steps
for key in list(metrics.keys()):
if not key.startswith("eval_"):
metrics[f"eval_{key}"] = metrics.pop(key)
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def log(self, logs: Dict[str, float]) -> None:
"""
Log :obj:`logs` on the various objects watching training.
Subclass and override this method to inject custom behavior.
Args:
logs (:obj:`Dict[str, float]`):
The values to log.
"""
if hasattr(self, "_log"):
warnings.warn(
"The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.",
FutureWarning,
)
return self._log(logs)
logs["epoch"] = self.epoch_logging
if self.tb_writer:
with self.tb_writer.as_default():
for k, v in logs.items():
tf.summary.scalar(k, v, step=self.global_step)
self.tb_writer.flush()
if is_wandb_available():
wandb.log(logs, step=self.global_step)
if is_comet_available():
experiment = comet_ml.config.get_global_experiment()
if experiment is not None:
experiment._log_metrics(
logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers"
)
output = {**logs, **{"step": self.global_step}}
logger.info(output)
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]:
"""
Run evaluation and returns metrics.
The calling script will be responsible for providing a method to compute metrics, as they are
task-dependent (pass it to the init :obj:`compute_metrics` argument).
Args:
eval_dataset (:class:`~tf.data.Dataset`, `optional`):
Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of
``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels.
If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features,
labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with
multiple targets, the loss is instead calculated by calling ``model(features, **labels)``.
Returns:
A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
"""
eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset)
output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation")
logs = {**output.metrics}
logs["epoch"] = self.epoch_logging
self.log(logs)
return output.metrics
def prediction_step(
self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor
) -> tf.Tensor:
"""
Compute the prediction on features and update the loss with labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, logits = self.run_model(features, labels, False)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
self.eval_loss.update_state(scaled_loss)
return logits
@tf.function
def distributed_prediction_steps(self, batch):
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
logits = self.args.strategy.run(self.prediction_step, inputs)
return logits
def train(self) -> None:
"""
Train method to train the model.
"""
train_ds = self.get_train_tfdataset()
if self.args.debug:
tf.summary.trace_on(graph=True, profiler=True)
self.gradient_accumulator.reset()
num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size
# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because
# the dataset is repeated before being batched.
# It has the effect only when TPU is used which requires explicit tensor shape in order to make
# the gradient accumulation implementation work.
approx = math.floor if self.args.dataloader_drop_last else math.ceil
num_update_steps_per_epoch = approx(num_update_steps_per_epoch)
# At least one update for each epoch.
num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)
self.steps_per_epoch = num_update_steps_per_epoch
if self.args.max_steps > 0:
t_total = self.args.max_steps
epochs = (self.args.max_steps // self.steps_per_epoch) + int(
self.args.max_steps % self.steps_per_epoch > 0
)
else:
t_total = self.steps_per_epoch * self.args.num_train_epochs
epochs = self.args.num_train_epochs
# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.
epochs = float(epochs)
with self.args.strategy.scope():
self.create_optimizer_and_scheduler(num_training_steps=t_total)
folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR)
ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model)
self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit)
iterations = self.optimizer.iterations
epochs_trained = 0
steps_trained_in_current_epoch = 0
if self.model.ckpt_manager.latest_checkpoint:
logger.info(
"Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint
)
ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial()
self.global_step = iterations.numpy()
epochs_trained = self.global_step // self.steps_per_epoch
steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", self.global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tf.summary.experimental.set_step(self.global_step)
with self.tb_writer.as_default():
tf.summary.text("args", self.args.to_json_string())
self.tb_writer.flush()
logger.info("***** Running training *****")
logger.info(" Num examples = %d", self.num_train_examples)
# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?
logger.info(" Num Epochs = %d", epochs)
logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size
)
logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps)
logger.info(" Steps per epoch = %d", self.steps_per_epoch)
logger.info(" Total optimization steps = %d", t_total)
self.train_loss = tf.keras.metrics.Sum()
start_time = datetime.datetime.now()
for epoch_iter in range(epochs_trained, int(epochs)):
# Reset the past mems state at the beginning of each epoch if necessary.
if self.args.past_index >= 0:
self._past = None
for step, batch in enumerate(train_ds):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
self.distributed_training_steps(batch)
self.global_step = iterations.numpy()
self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch
training_loss = self.train_loss.result() / (step + 1)
if self.args.debug:
logs = {}
logs["loss"] = training_loss.numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.global_step == 1 and self.args.debug:
with self.tb_writer.as_default():
tf.summary.trace_export(
name="training", step=self.global_step, profiler_outdir=self.args.logging_dir
)
if (
self.args.eval_steps > 0
and self.args.evaluate_during_training
and self.global_step % self.args.eval_steps == 0
):
self.evaluate()
if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or (
self.global_step == 1 and self.args.logging_first_step
):
logs = {}
logs["loss"] = training_loss.numpy()
logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy()
logs["epoch"] = self.epoch_logging
self.log(logs)
if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0:
ckpt_save_path = self.model.ckpt_manager.save()
logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path))
if self.args.max_steps > 0 and self.global_step >= t_total:
break
if self.global_step % self.steps_per_epoch == 0:
break
self.train_loss.reset_states()
if self.args.max_steps > 0 and self.global_step >= self.args.max_steps:
break
end_time = datetime.datetime.now()
logger.info("Training took: {}".format(str(end_time - start_time)))
if self.args.past_index and hasattr(self, "_past"):
# Clean the state at the end of training
delattr(self, "_past")
def training_step(self, features, labels, nb_instances_in_global_batch):
"""
Perform a training step on features and labels.
Subclass and override to inject some custom behavior.
"""
per_example_loss, _ = self.run_model(features, labels, True)
scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype)
gradients = tf.gradients(scaled_loss, self.model.trainable_variables)
gradients = [
g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables)
]
if self.args.gradient_accumulation_steps > 1:
self.gradient_accumulator(gradients)
self.train_loss.update_state(scaled_loss)
if self.args.gradient_accumulation_steps == 1:
return gradients
def apply_gradients(self, features, labels, nb_instances_in_global_batch):
if self.args.gradient_accumulation_steps == 1:
gradients = self.training_step(features, labels, nb_instances_in_global_batch)
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
else:
for _ in tf.range(self.args.gradient_accumulation_steps):
reduced_features = {
k: ft[: self.args.train_batch_size // self.args.n_replicas] for k, ft in features.items()
}
reduced_labels = labels[: self.args.train_batch_size // self.args.n_replicas]
self.training_step(reduced_features, reduced_labels, nb_instances_in_global_batch)
features = {
k: tf.concat(
[ft[self.args.train_batch_size // self.args.n_replicas :], reduced_features[k]],
axis=0,
)
for k, ft in features.items()
}
labels = tf.concat(
[labels[self.args.train_batch_size // self.args.n_replicas :], reduced_labels], axis=0
)
gradients = self.gradient_accumulator.gradients
gradients = [
(tf.clip_by_value(grad, -self.args.max_grad_norm, self.args.max_grad_norm)) for grad in gradients
]
self.optimizer.apply_gradients(list(zip(gradients, self.model.trainable_variables)))
self.gradient_accumulator.reset()
@tf.function
def distributed_training_steps(self, batch):
with self.args.strategy.scope():
nb_instances_in_batch = self._compute_nb_instances(batch)
inputs = self._get_step_inputs(batch, nb_instances_in_batch)
self.args.strategy.run(self.apply_gradients, inputs)
@staticmethod
def _compute_nb_instances(batch):
labels = batch[-1]
if isinstance(labels, PerReplica):
labels = tf.concat(labels.values, axis=0)
nb_instances = tf.reduce_sum(tf.cast(labels != -100, dtype=tf.int32))
return nb_instances
@staticmethod
def _get_step_inputs(batch, nb_instances):
features, labels = batch
if isinstance(labels, PerReplica):
# need to make a `PerReplica` objects for ``nb_instances``
nb_instances = PerReplica([nb_instances] * len(labels.values))
step_inputs = (features, labels, nb_instances)
return step_inputs
def run_model(self, features, labels, training):
"""
Computes the loss of the given features and labels pair.
Subclass and override this method if you want to inject some custom behavior.
Args:
features (:obj:`tf.Tensor`): A batch of input features.
labels (:obj:`tf.Tensor`): A batch of labels.
training (:obj:`bool`): Whether or not to run the model in training mode.
Returns:
A tuple of two :obj:`tf.Tensor`: The loss and logits.
"""
if hasattr(self, "_run_model"):
warnings.warn(
"The `_run_model` method is deprecated and won't be called in a future version, define `run_model` in your subclass.",
FutureWarning,
)
return self._run_model(features, labels, training)
if self.args.past_index >= 0 and getattr(self, "_past", None) is not None:
features["mems"] = self._past
if isinstance(labels, (dict)):
outputs = self.model(features, training=training, **labels)[:2]
else:
outputs = self.model(features, labels=labels, training=training)[:2]
loss, logits = outputs[:2]
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
return loss, logits
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput:
"""
Run prediction and returns predictions and potential metrics.
Depending on the dataset and your use case, your test dataset may contain labels.
In that case, this method will also return metrics, like in :obj:`evaluate()`.
Args:
test_dataset (:class:`~tf.data.Dataset`):
Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where
``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor,
the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is
a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead
calculated by calling ``model(features, **labels)``.
Returns:
`NamedTuple`:
predictions (:obj:`np.ndarray`):
The predictions on :obj:`test_dataset`.
label_ids (:obj:`np.ndarray`, `optional`):
The labels (if the dataset contained some).
metrics (:obj:`Dict[str, float]`, `optional`):
The potential dictionary of metrics (if the dataset contained labels).
"""
test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset)
return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
def save_model(self, output_dir: Optional[str] = None):
"""
Will save the model, so you can reload it using :obj:`from_pretrained()`.
"""
output_dir = output_dir if output_dir is not None else self.args.output_dir
logger.info("Saving model in {}".format(output_dir))
if not isinstance(self.model, TFPreTrainedModel):
raise ValueError("Trainer.model appears to not be a PreTrainedModel")
self.model.save_pretrained(output_dir)
|
__init__
|
:param Attacker attacker: The attacker you use.
:param Classifier classifier: The classifier you want to attack.
:param int invoke_limit: Limitation of invoke for each instance.
:param bool average_invoke: If true, returns "Avg. Victim Model Queries".
:param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail.
|
from .default import DefaultAttackEval
from ..classifier import Classifier
from ..attacker import Attacker
import json
from tqdm import tqdm
class InvokeLimitException(Exception):
pass
class InvokeLimitClassifierWrapper(Classifier):
def __init__(self, clsf, invoke_limit):
self.__invoke_limit = invoke_limit
self.__clsf = clsf
self.__brk = False
self.__invoke = 0
def clear(self):
self.__invoke = 0
def test(self, limit=True):
self.__brk = limit
def get_invoke(self):
return self.__invoke
def get_pred(self, input_, data):
if self.__brk and self.__invoke >= self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_pred(input_, data)
def get_prob(self, input_, data):
if self.__brk and self.__invoke >= self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_prob(input_, data)
def get_grad(self, input_, labels, data):
if self.__brk and self.__invoke > self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_grad(input_, labels, data)
class InvokeLimitAttackerWrapper(Attacker):
def __init__(self, attacker, clsf):
self.__attacker = attacker
self.__clsf = clsf
self.__exceed = False
def __call__(self, *args, **kwargs):
self.__clsf.test()
self.__clsf.clear()
self.__exceed = False
try:
ret = self.__attacker(*args, **kwargs)
except InvokeLimitException:
ret = None
self.__exceed = True
self.__clsf.test(limit=False)
return ret
def exceed(self):
return self.__exceed
class InvokeLimitedAttackEval(DefaultAttackEval):
"""
Evaluate attackers and classifiers with invoke limitation.
"""
# MASKED: __init__ function (lines 69-88)
def measure(self, sentA, sentB):
info = super().measure(sentA, sentB)
if self.__attacker.exceed():
info["Query Exceeded"] = True
else:
info["Query Exceeded"] = False
# only records succeed attacks
if info["Succeed"] and self.__average_invoke:
info["Queries"] = self.__classifier.get_invoke()
return info
def update(self, info):
info = super().update(info)
if "Queries" in info:
if "invoke" not in self.__result:
self.__result["invoke"] = 0
self.__result["invoke"] += info["Queries"]
if info["Query Exceeded"]:
if "out_of_invoke" not in self.__result:
self.__result["out_of_invoke"] = 0
self.__result["out_of_invoke"] += 1
return info
def clear(self):
super().clear()
self.__result = {}
def get_result(self):
ret = super().get_result()
if self.__average_invoke and "invoke" in self.__result:
ret["Avg. Victim Model Queries"] = self.__result["invoke"] / ret["Successful Instances"]
return ret
|
def __init__(self, attacker, classifier, invoke_limit=100,
average_invoke=False, **kwargs):
"""
:param Attacker attacker: The attacker you use.
:param Classifier classifier: The classifier you want to attack.
:param int invoke_limit: Limitation of invoke for each instance.
:param bool average_invoke: If true, returns "Avg. Victim Model Queries".
:param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail.
"""
super().__init__(attacker, classifier, **kwargs)
# wrap classifier, attacker after super().__init__
self.classifier = InvokeLimitClassifierWrapper(self.classifier, invoke_limit)
self.attacker = InvokeLimitAttackerWrapper(self.attacker, self.classifier)
# keep a private version
self.__attacker = self.attacker
self.__classifier = self.classifier
self.__average_invoke = average_invoke
| 69
| 88
|
from .default import DefaultAttackEval
from ..classifier import Classifier
from ..attacker import Attacker
import json
from tqdm import tqdm
class InvokeLimitException(Exception):
pass
class InvokeLimitClassifierWrapper(Classifier):
def __init__(self, clsf, invoke_limit):
self.__invoke_limit = invoke_limit
self.__clsf = clsf
self.__brk = False
self.__invoke = 0
def clear(self):
self.__invoke = 0
def test(self, limit=True):
self.__brk = limit
def get_invoke(self):
return self.__invoke
def get_pred(self, input_, data):
if self.__brk and self.__invoke >= self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_pred(input_, data)
def get_prob(self, input_, data):
if self.__brk and self.__invoke >= self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_prob(input_, data)
def get_grad(self, input_, labels, data):
if self.__brk and self.__invoke > self.__invoke_limit:
raise InvokeLimitException()
self.__invoke += len(input_)
return self.__clsf.get_grad(input_, labels, data)
class InvokeLimitAttackerWrapper(Attacker):
def __init__(self, attacker, clsf):
self.__attacker = attacker
self.__clsf = clsf
self.__exceed = False
def __call__(self, *args, **kwargs):
self.__clsf.test()
self.__clsf.clear()
self.__exceed = False
try:
ret = self.__attacker(*args, **kwargs)
except InvokeLimitException:
ret = None
self.__exceed = True
self.__clsf.test(limit=False)
return ret
def exceed(self):
return self.__exceed
class InvokeLimitedAttackEval(DefaultAttackEval):
"""
Evaluate attackers and classifiers with invoke limitation.
"""
def __init__(self, attacker, classifier, invoke_limit=100,
average_invoke=False, **kwargs):
"""
:param Attacker attacker: The attacker you use.
:param Classifier classifier: The classifier you want to attack.
:param int invoke_limit: Limitation of invoke for each instance.
:param bool average_invoke: If true, returns "Avg. Victim Model Queries".
:param kwargs: Other parameters, see :py:class:`.DefaultAttackEval` for detail.
"""
super().__init__(attacker, classifier, **kwargs)
# wrap classifier, attacker after super().__init__
self.classifier = InvokeLimitClassifierWrapper(self.classifier, invoke_limit)
self.attacker = InvokeLimitAttackerWrapper(self.attacker, self.classifier)
# keep a private version
self.__attacker = self.attacker
self.__classifier = self.classifier
self.__average_invoke = average_invoke
def measure(self, sentA, sentB):
info = super().measure(sentA, sentB)
if self.__attacker.exceed():
info["Query Exceeded"] = True
else:
info["Query Exceeded"] = False
# only records succeed attacks
if info["Succeed"] and self.__average_invoke:
info["Queries"] = self.__classifier.get_invoke()
return info
def update(self, info):
info = super().update(info)
if "Queries" in info:
if "invoke" not in self.__result:
self.__result["invoke"] = 0
self.__result["invoke"] += info["Queries"]
if info["Query Exceeded"]:
if "out_of_invoke" not in self.__result:
self.__result["out_of_invoke"] = 0
self.__result["out_of_invoke"] += 1
return info
def clear(self):
super().clear()
self.__result = {}
def get_result(self):
ret = super().get_result()
if self.__average_invoke and "invoke" in self.__result:
ret["Avg. Victim Model Queries"] = self.__result["invoke"] / ret["Successful Instances"]
return ret
|
_compute_inverse_bounds
|
Computes the image of the transform so we can clip when we untransform.
The inverse of the Yeo-Johnson transform is given by:
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))
elif X < 0 and lambda == 2:
X = 1 - exp(-X_trans)
We can break this down into three cases:
lambda < 0: X < -1 / lambda
0 <= lambda <= 2: X is unbounded
lambda > 2: X > 1 / (2 - lambda)
Sklearn standardizes the transformed values to have mean zero and standard
deviation 1, so we also need to account for this when we compute the bounds.
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
import numpy as np
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ScalarizedOutcomeConstraint
from ax.core.search_space import SearchSpace
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.utils import get_data, match_ci_width_truncated
from ax.models.types import TConfig
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast_list
from sklearn.preprocessing import PowerTransformer
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
logger = get_logger(__name__)
class PowerTransformY(Transform):
"""Transform the values to look as normally distributed as possible.
This fits a power transform to the data with the goal of making the transformed
values look as normally distributed as possible. We use Yeo-Johnson
(https://www.stat.umn.edu/arc/yjpower.pdf), which can handle both positive and
negative values.
While the transform seems to be quite robust, it probably makes sense to apply a
bit of winsorization and also standardize the inputs before applying the power
transform. The power transform will automatically standardize the data so the
data will remain standardized.
The transform can't be inverted for all values, so we apply clipping to move
values to the image of the transform. This behavior can be controlled via the
`clip_mean` setting.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional[modelbridge_module.base.ModelBridge] = None,
config: Optional[TConfig] = None,
) -> None:
if config is None:
raise ValueError("PowerTransform requires a config.")
# pyre-fixme[6]: Same issue as for LogY
metric_names = list(config.get("metrics", []))
if len(metric_names) == 0:
raise ValueError("Must specify at least one metric in the config.")
self.clip_mean = config.get("clip_mean", True)
self.metric_names = metric_names
Ys = get_data(observation_data=observation_data, metric_names=metric_names)
self.power_transforms = _compute_power_transforms(Ys=Ys)
self.inv_bounds = _compute_inverse_bounds(self.power_transforms, tol=1e-10)
def transform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for i, m in enumerate(obsd.metric_names):
if m in self.metric_names:
transform = self.power_transforms[m].transform
obsd.means[i], obsd.covariance[i, i] = match_ci_width_truncated(
mean=obsd.means[i],
variance=obsd.covariance[i, i],
transform=lambda y: transform(np.array(y, ndmin=2)),
lower_bound=-np.inf,
upper_bound=np.inf,
)
return observation_data
def untransform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for i, m in enumerate(obsd.metric_names):
if m in self.metric_names:
l, u = self.inv_bounds[m]
transform = self.power_transforms[m].inverse_transform
if not self.clip_mean and (obsd.means[i] < l or obsd.means[i] > u):
raise ValueError(
"Can't untransform mean outside the bounds without clipping"
)
obsd.means[i], obsd.covariance[i, i] = match_ci_width_truncated(
mean=obsd.means[i],
variance=obsd.covariance[i, i],
transform=lambda y: transform(np.array(y, ndmin=2)),
lower_bound=l,
upper_bound=u,
clip_mean=True,
)
return observation_data
def transform_optimization_config(
self,
optimization_config: OptimizationConfig,
modelbridge: Optional[modelbridge_module.base.ModelBridge],
fixed_features: ObservationFeatures,
) -> OptimizationConfig:
for c in optimization_config.all_constraints:
if isinstance(c, ScalarizedOutcomeConstraint):
c_metric_names = [metric.name for metric in c.metrics]
intersection = set(c_metric_names) & set(self.metric_names)
if intersection:
raise NotImplementedError(
f"PowerTransformY cannot be used for metric(s) {intersection} "
"that are part of a ScalarizedOutcomeConstraint."
)
elif c.metric.name in self.metric_names:
if c.relative:
raise ValueError(
f"PowerTransformY cannot be applied to metric {c.metric.name} "
"since it is subject to a relative constraint."
)
else:
transform = self.power_transforms[c.metric.name].transform
c.bound = transform(np.array(c.bound, ndmin=2)).item()
return optimization_config
def _compute_power_transforms(
Ys: Dict[str, List[float]]
) -> Dict[str, PowerTransformer]:
"""Compute power transforms."""
power_transforms = {}
for k, ys in Ys.items():
y = np.array(ys)[:, None] # Need to unsqueeze the last dimension
pt = PowerTransformer(method="yeo-johnson").fit(y)
power_transforms[k] = pt
return power_transforms
# MASKED: _compute_inverse_bounds function (lines 153-186)
|
def _compute_inverse_bounds(
power_transforms: Dict[str, PowerTransformer], tol: float = 1e-10
) -> Dict[str, Tuple[float, float]]:
"""Computes the image of the transform so we can clip when we untransform.
The inverse of the Yeo-Johnson transform is given by:
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))
elif X < 0 and lambda == 2:
X = 1 - exp(-X_trans)
We can break this down into three cases:
lambda < 0: X < -1 / lambda
0 <= lambda <= 2: X is unbounded
lambda > 2: X > 1 / (2 - lambda)
Sklearn standardizes the transformed values to have mean zero and standard
deviation 1, so we also need to account for this when we compute the bounds.
"""
inv_bounds = defaultdict()
for k, pt in power_transforms.items():
bounds = [-np.inf, np.inf]
mu, sigma = pt._scaler.mean_.item(), pt._scaler.scale_.item() # pyre-ignore
lambda_ = pt.lambdas_.item() # pyre-ignore
if lambda_ < -1 * tol:
bounds[1] = (-1.0 / lambda_ - mu) / sigma
elif lambda_ > 2.0 + tol:
bounds[0] = (1.0 / (2.0 - lambda_) - mu) / sigma
inv_bounds[k] = tuple(checked_cast_list(float, bounds))
return inv_bounds
| 153
| 186
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple
import numpy as np
from ax.core.observation import ObservationData, ObservationFeatures
from ax.core.optimization_config import OptimizationConfig
from ax.core.outcome_constraint import ScalarizedOutcomeConstraint
from ax.core.search_space import SearchSpace
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.utils import get_data, match_ci_width_truncated
from ax.models.types import TConfig
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import checked_cast_list
from sklearn.preprocessing import PowerTransformer
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
logger = get_logger(__name__)
class PowerTransformY(Transform):
"""Transform the values to look as normally distributed as possible.
This fits a power transform to the data with the goal of making the transformed
values look as normally distributed as possible. We use Yeo-Johnson
(https://www.stat.umn.edu/arc/yjpower.pdf), which can handle both positive and
negative values.
While the transform seems to be quite robust, it probably makes sense to apply a
bit of winsorization and also standardize the inputs before applying the power
transform. The power transform will automatically standardize the data so the
data will remain standardized.
The transform can't be inverted for all values, so we apply clipping to move
values to the image of the transform. This behavior can be controlled via the
`clip_mean` setting.
"""
def __init__(
self,
search_space: SearchSpace,
observation_features: List[ObservationFeatures],
observation_data: List[ObservationData],
modelbridge: Optional[modelbridge_module.base.ModelBridge] = None,
config: Optional[TConfig] = None,
) -> None:
if config is None:
raise ValueError("PowerTransform requires a config.")
# pyre-fixme[6]: Same issue as for LogY
metric_names = list(config.get("metrics", []))
if len(metric_names) == 0:
raise ValueError("Must specify at least one metric in the config.")
self.clip_mean = config.get("clip_mean", True)
self.metric_names = metric_names
Ys = get_data(observation_data=observation_data, metric_names=metric_names)
self.power_transforms = _compute_power_transforms(Ys=Ys)
self.inv_bounds = _compute_inverse_bounds(self.power_transforms, tol=1e-10)
def transform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for i, m in enumerate(obsd.metric_names):
if m in self.metric_names:
transform = self.power_transforms[m].transform
obsd.means[i], obsd.covariance[i, i] = match_ci_width_truncated(
mean=obsd.means[i],
variance=obsd.covariance[i, i],
transform=lambda y: transform(np.array(y, ndmin=2)),
lower_bound=-np.inf,
upper_bound=np.inf,
)
return observation_data
def untransform_observation_data(
self,
observation_data: List[ObservationData],
observation_features: List[ObservationFeatures],
) -> List[ObservationData]:
"""Winsorize observation data in place."""
for obsd in observation_data:
for i, m in enumerate(obsd.metric_names):
if m in self.metric_names:
l, u = self.inv_bounds[m]
transform = self.power_transforms[m].inverse_transform
if not self.clip_mean and (obsd.means[i] < l or obsd.means[i] > u):
raise ValueError(
"Can't untransform mean outside the bounds without clipping"
)
obsd.means[i], obsd.covariance[i, i] = match_ci_width_truncated(
mean=obsd.means[i],
variance=obsd.covariance[i, i],
transform=lambda y: transform(np.array(y, ndmin=2)),
lower_bound=l,
upper_bound=u,
clip_mean=True,
)
return observation_data
def transform_optimization_config(
self,
optimization_config: OptimizationConfig,
modelbridge: Optional[modelbridge_module.base.ModelBridge],
fixed_features: ObservationFeatures,
) -> OptimizationConfig:
for c in optimization_config.all_constraints:
if isinstance(c, ScalarizedOutcomeConstraint):
c_metric_names = [metric.name for metric in c.metrics]
intersection = set(c_metric_names) & set(self.metric_names)
if intersection:
raise NotImplementedError(
f"PowerTransformY cannot be used for metric(s) {intersection} "
"that are part of a ScalarizedOutcomeConstraint."
)
elif c.metric.name in self.metric_names:
if c.relative:
raise ValueError(
f"PowerTransformY cannot be applied to metric {c.metric.name} "
"since it is subject to a relative constraint."
)
else:
transform = self.power_transforms[c.metric.name].transform
c.bound = transform(np.array(c.bound, ndmin=2)).item()
return optimization_config
def _compute_power_transforms(
Ys: Dict[str, List[float]]
) -> Dict[str, PowerTransformer]:
"""Compute power transforms."""
power_transforms = {}
for k, ys in Ys.items():
y = np.array(ys)[:, None] # Need to unsqueeze the last dimension
pt = PowerTransformer(method="yeo-johnson").fit(y)
power_transforms[k] = pt
return power_transforms
def _compute_inverse_bounds(
power_transforms: Dict[str, PowerTransformer], tol: float = 1e-10
) -> Dict[str, Tuple[float, float]]:
"""Computes the image of the transform so we can clip when we untransform.
The inverse of the Yeo-Johnson transform is given by:
if X >= 0 and lambda == 0:
X = exp(X_trans) - 1
elif X >= 0 and lambda != 0:
X = (X_trans * lambda + 1) ** (1 / lambda) - 1
elif X < 0 and lambda != 2:
X = 1 - (-(2 - lambda) * X_trans + 1) ** (1 / (2 - lambda))
elif X < 0 and lambda == 2:
X = 1 - exp(-X_trans)
We can break this down into three cases:
lambda < 0: X < -1 / lambda
0 <= lambda <= 2: X is unbounded
lambda > 2: X > 1 / (2 - lambda)
Sklearn standardizes the transformed values to have mean zero and standard
deviation 1, so we also need to account for this when we compute the bounds.
"""
inv_bounds = defaultdict()
for k, pt in power_transforms.items():
bounds = [-np.inf, np.inf]
mu, sigma = pt._scaler.mean_.item(), pt._scaler.scale_.item() # pyre-ignore
lambda_ = pt.lambdas_.item() # pyre-ignore
if lambda_ < -1 * tol:
bounds[1] = (-1.0 / lambda_ - mu) / sigma
elif lambda_ > 2.0 + tol:
bounds[0] = (1.0 / (2.0 - lambda_) - mu) / sigma
inv_bounds[k] = tuple(checked_cast_list(float, bounds))
return inv_bounds
|
write
|
Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
|
#
# Copyright 2019 Jonas Berg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
.. moduleauthor:: Jonas Berg
dummy_serial: A dummy/mock implementation of a serial port for testing purposes.
"""
__author__ = "Jonas Berg"
__license__ = "Apache License, Version 2.0"
import sys
import time
DEFAULT_TIMEOUT = 0.01
"""The default timeot value in seconds. Used if not set by the constructor."""
DEFAULT_BAUDRATE = 19200
"""The default baud rate. Used if not set by the constructor."""
VERBOSE = False
"""Set this to :const:`True` for printing the communication, and also details on the port initialization.
Might be monkey-patched in the calling test module.
"""
RESPONSES = {}
"""A dictionary of respones from the dummy serial port.
The key is the message (string) sent to the dummy serial port, and the item is the response (string)
from the dummy serial port.
Intended to be monkey-patched in the calling test module.
"""
RESPONSES["EXAMPLEREQUEST"] = "EXAMPLERESPONSE"
DEFAULT_RESPONSE = "NotFoundInResponseDictionary"
"""Response when no matching message (key) is found in the look-up dictionary.
Should not be an empty string, as that is interpreted as "no data available on port".
Might be monkey-patched in the calling test module.
"""
NO_DATA_PRESENT = ""
class Serial:
"""Dummy (mock) serial port for testing purposes.
Mimics the behavior of a serial port as defined by the `pySerial <https://github.com/pyserial/pyserial>`_ module.
Args:
* port:
* timeout:
Note:
As the portname argument not is used properly, only one port on :mod:`dummy_serial` can be used simultaneously.
"""
def __init__(self, *args, **kwargs):
self._waiting_data = NO_DATA_PRESENT
self._isOpen = True
self.port = kwargs["port"] # Serial port name.
self._initial_port_name = self.port # Initial name given to the serial port
try:
self.timeout = kwargs["timeout"]
except:
self.timeout = DEFAULT_TIMEOUT
try:
self.baudrate = kwargs["baudrate"]
except:
self.baudrate = DEFAULT_BAUDRATE
if VERBOSE:
_print_out("\nDummy_serial: Initializing")
_print_out("dummy_serial initialization args: " + repr(args))
_print_out("dummy_serial initialization kwargs: " + repr(kwargs) + "\n")
def __repr__(self):
"""String representation of the dummy_serial object"""
return "{0}.{1}<id=0x{2:x}, open={3}>(port={4!r}, timeout={5!r}, waiting_data={6!r})".format(
self.__module__,
self.__class__.__name__,
id(self),
self._isOpen,
self.port,
self.timeout,
self._waiting_data,
)
@property
def is_open(self):
return self._isOpen
def reset_input_buffer(self):
pass
def reset_output_buffer(self):
pass
def open(self):
"""Open a (previously initialized) port on dummy_serial."""
if VERBOSE:
_print_out("\nDummy_serial: Opening port\n")
if self._isOpen:
raise IOError("Dummy_serial: The port is already open")
self._isOpen = True
self.port = self._initial_port_name
def close(self):
"""Close a port on dummy_serial."""
if VERBOSE:
_print_out("\nDummy_serial: Closing port\n")
if not self._isOpen:
raise IOError("Dummy_serial: The port is already closed")
self._isOpen = False
self.port = None
# MASKED: write function (lines 146-181)
def read(self, numberOfBytes):
"""Read from a port on dummy_serial.
The response is dependent on what was written last to the port on dummy_serial,
and what is defined in the :data:`RESPONSES` dictionary.
Args:
numberOfBytes (int): For compability with the real function.
Returns a **string** for Python2 and **bytes** for Python3.
If the response is shorter than numberOfBytes, it will sleep for timeout.
If the response is longer than numberOfBytes, it will return only numberOfBytes bytes.
"""
if VERBOSE:
_print_out(
"\nDummy_serial: Reading from port (max length {!r} bytes)".format(
numberOfBytes
)
)
if numberOfBytes < 0:
raise IOError(
"Dummy_serial: The numberOfBytes to read must not be negative. Given: {!r}".format(
numberOfBytes
)
)
if not self._isOpen:
raise IOError("Dummy_serial: Trying to read, but the port is not open.")
# Do the actual reading from the waiting data, and simulate the influence of numberOfBytes
if self._waiting_data == DEFAULT_RESPONSE:
returnstring = self._waiting_data
elif numberOfBytes == len(self._waiting_data):
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
elif numberOfBytes < len(self._waiting_data):
if VERBOSE:
_print_out(
"Dummy_serial: The numberOfBytes to read is smaller than the available data. "
+ "Some bytes will be kept for later. Available data: {!r} (length = {}), numberOfBytes: {}".format(
self._waiting_data, len(self._waiting_data), numberOfBytes
)
)
returnstring = self._waiting_data[:numberOfBytes]
self._waiting_data = self._waiting_data[numberOfBytes:]
else: # Wait for timeout, as we have asked for more data than available
if VERBOSE:
_print_out(
"Dummy_serial: The numberOfBytes to read is larger than the available data. "
+ "Will sleep until timeout. Available data: {!r} (length = {}), numberOfBytes: {}".format(
self._waiting_data, len(self._waiting_data), numberOfBytes
)
)
time.sleep(self.timeout)
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
# TODO Adapt the behavior to better mimic the Windows behavior
if VERBOSE:
_print_out(
"Dummy_serial read return data: {!r} (has length {})\n".format(
returnstring, len(returnstring)
)
)
if sys.version_info[0] > 2: # Convert types to make it python3 compatible
return bytes(returnstring, encoding="latin1")
else:
return returnstring
def _print_out(inputstring):
"""Print the inputstring. To make it compatible with Python2 and Python3."""
sys.stdout.write(inputstring + "\n")
|
def write(self, inputdata):
"""Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
"""
if VERBOSE:
_print_out(
"\nDummy_serial: Writing to port. Given:" + repr(inputdata) + "\n"
)
if sys.version_info[0] > 2:
if not type(inputdata) == bytes:
raise TypeError(
"The input must be type bytes. Given:" + repr(inputdata)
)
inputstring = str(inputdata, encoding="latin1")
else:
inputstring = inputdata
if not self._isOpen:
raise IOError(
"Dummy_serial: Trying to write, but the port is not open. Given:"
+ repr(inputdata)
)
# Look up which data that should be waiting for subsequent read commands
try:
response = RESPONSES[inputstring]
except:
response = DEFAULT_RESPONSE
self._waiting_data = response
| 146
| 181
|
#
# Copyright 2019 Jonas Berg
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
.. moduleauthor:: Jonas Berg
dummy_serial: A dummy/mock implementation of a serial port for testing purposes.
"""
__author__ = "Jonas Berg"
__license__ = "Apache License, Version 2.0"
import sys
import time
DEFAULT_TIMEOUT = 0.01
"""The default timeot value in seconds. Used if not set by the constructor."""
DEFAULT_BAUDRATE = 19200
"""The default baud rate. Used if not set by the constructor."""
VERBOSE = False
"""Set this to :const:`True` for printing the communication, and also details on the port initialization.
Might be monkey-patched in the calling test module.
"""
RESPONSES = {}
"""A dictionary of respones from the dummy serial port.
The key is the message (string) sent to the dummy serial port, and the item is the response (string)
from the dummy serial port.
Intended to be monkey-patched in the calling test module.
"""
RESPONSES["EXAMPLEREQUEST"] = "EXAMPLERESPONSE"
DEFAULT_RESPONSE = "NotFoundInResponseDictionary"
"""Response when no matching message (key) is found in the look-up dictionary.
Should not be an empty string, as that is interpreted as "no data available on port".
Might be monkey-patched in the calling test module.
"""
NO_DATA_PRESENT = ""
class Serial:
"""Dummy (mock) serial port for testing purposes.
Mimics the behavior of a serial port as defined by the `pySerial <https://github.com/pyserial/pyserial>`_ module.
Args:
* port:
* timeout:
Note:
As the portname argument not is used properly, only one port on :mod:`dummy_serial` can be used simultaneously.
"""
def __init__(self, *args, **kwargs):
self._waiting_data = NO_DATA_PRESENT
self._isOpen = True
self.port = kwargs["port"] # Serial port name.
self._initial_port_name = self.port # Initial name given to the serial port
try:
self.timeout = kwargs["timeout"]
except:
self.timeout = DEFAULT_TIMEOUT
try:
self.baudrate = kwargs["baudrate"]
except:
self.baudrate = DEFAULT_BAUDRATE
if VERBOSE:
_print_out("\nDummy_serial: Initializing")
_print_out("dummy_serial initialization args: " + repr(args))
_print_out("dummy_serial initialization kwargs: " + repr(kwargs) + "\n")
def __repr__(self):
"""String representation of the dummy_serial object"""
return "{0}.{1}<id=0x{2:x}, open={3}>(port={4!r}, timeout={5!r}, waiting_data={6!r})".format(
self.__module__,
self.__class__.__name__,
id(self),
self._isOpen,
self.port,
self.timeout,
self._waiting_data,
)
@property
def is_open(self):
return self._isOpen
def reset_input_buffer(self):
pass
def reset_output_buffer(self):
pass
def open(self):
"""Open a (previously initialized) port on dummy_serial."""
if VERBOSE:
_print_out("\nDummy_serial: Opening port\n")
if self._isOpen:
raise IOError("Dummy_serial: The port is already open")
self._isOpen = True
self.port = self._initial_port_name
def close(self):
"""Close a port on dummy_serial."""
if VERBOSE:
_print_out("\nDummy_serial: Closing port\n")
if not self._isOpen:
raise IOError("Dummy_serial: The port is already closed")
self._isOpen = False
self.port = None
def write(self, inputdata):
"""Write to a port on dummy_serial.
Args:
inputdata (string/bytes): data for sending to the port on dummy_serial. Will affect the response
for subsequent read operations.
Note that for Python2, the inputdata should be a **string**. For Python3 it should be of type **bytes**.
"""
if VERBOSE:
_print_out(
"\nDummy_serial: Writing to port. Given:" + repr(inputdata) + "\n"
)
if sys.version_info[0] > 2:
if not type(inputdata) == bytes:
raise TypeError(
"The input must be type bytes. Given:" + repr(inputdata)
)
inputstring = str(inputdata, encoding="latin1")
else:
inputstring = inputdata
if not self._isOpen:
raise IOError(
"Dummy_serial: Trying to write, but the port is not open. Given:"
+ repr(inputdata)
)
# Look up which data that should be waiting for subsequent read commands
try:
response = RESPONSES[inputstring]
except:
response = DEFAULT_RESPONSE
self._waiting_data = response
def read(self, numberOfBytes):
"""Read from a port on dummy_serial.
The response is dependent on what was written last to the port on dummy_serial,
and what is defined in the :data:`RESPONSES` dictionary.
Args:
numberOfBytes (int): For compability with the real function.
Returns a **string** for Python2 and **bytes** for Python3.
If the response is shorter than numberOfBytes, it will sleep for timeout.
If the response is longer than numberOfBytes, it will return only numberOfBytes bytes.
"""
if VERBOSE:
_print_out(
"\nDummy_serial: Reading from port (max length {!r} bytes)".format(
numberOfBytes
)
)
if numberOfBytes < 0:
raise IOError(
"Dummy_serial: The numberOfBytes to read must not be negative. Given: {!r}".format(
numberOfBytes
)
)
if not self._isOpen:
raise IOError("Dummy_serial: Trying to read, but the port is not open.")
# Do the actual reading from the waiting data, and simulate the influence of numberOfBytes
if self._waiting_data == DEFAULT_RESPONSE:
returnstring = self._waiting_data
elif numberOfBytes == len(self._waiting_data):
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
elif numberOfBytes < len(self._waiting_data):
if VERBOSE:
_print_out(
"Dummy_serial: The numberOfBytes to read is smaller than the available data. "
+ "Some bytes will be kept for later. Available data: {!r} (length = {}), numberOfBytes: {}".format(
self._waiting_data, len(self._waiting_data), numberOfBytes
)
)
returnstring = self._waiting_data[:numberOfBytes]
self._waiting_data = self._waiting_data[numberOfBytes:]
else: # Wait for timeout, as we have asked for more data than available
if VERBOSE:
_print_out(
"Dummy_serial: The numberOfBytes to read is larger than the available data. "
+ "Will sleep until timeout. Available data: {!r} (length = {}), numberOfBytes: {}".format(
self._waiting_data, len(self._waiting_data), numberOfBytes
)
)
time.sleep(self.timeout)
returnstring = self._waiting_data
self._waiting_data = NO_DATA_PRESENT
# TODO Adapt the behavior to better mimic the Windows behavior
if VERBOSE:
_print_out(
"Dummy_serial read return data: {!r} (has length {})\n".format(
returnstring, len(returnstring)
)
)
if sys.version_info[0] > 2: # Convert types to make it python3 compatible
return bytes(returnstring, encoding="latin1")
else:
return returnstring
def _print_out(inputstring):
"""Print the inputstring. To make it compatible with Python2 and Python3."""
sys.stdout.write(inputstring + "\n")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.