Datasets:
file_path stringlengths 10 10 | code stringlengths 79 330k | code_en stringlengths 79 330k | language stringclasses 1
value | license stringclasses 0
values | token_count int32 |
|---|---|---|---|---|---|
0037437.py | from classes.Despachante import *
from classes.Sistema import *
from classes.Processo import *
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askopenfilename as fileChooser
class EscDeProcessos:
def __init__(self, master=None):
#Tamanho da janela
master.minsize(width=720, height=480)
master.maxsize(width=720, height=480)
#Variavel para saber quando o programa deve iniciar
self.executando = FALSE
#Iniciadores das classes
self.sist = Sistema()
self.arq = "processos.txt"
#self.desp = None
self.desp = Despachante(self.arq, self.sist.pegaTotalRam())
self.esc = Escalonador(2)
self.termAux = 0
self.pausado = FALSE
self.tProcessos = 0
#1º container
self.menu = Frame(master)
self.menu["pady"] = 5
self.menu.pack(side=TOP)
#Botão de escolher o arquivo de processos no PC
self.escolherArq = Button(self.menu, text='Escolher Arquivo', command=self.escolherArq)
self.escolherArq["font"] = ("Arial", "10")
self.escolherArq["width"] = 15
self.escolherArq.pack(side=LEFT)
#Botão que inicia a execução do sistema
self.executar = Button(self.menu, text="Escalonar Processos")
self.executar.bind("<Button-1>", self.escalonarProcessos)
self.executar["font"] = ("Arial", "10")
self.executar["width"] = 15
self.executar.pack(side=LEFT)
self.pause = Button(self.menu, text="Pausar", command=self.pausar)
self.pause["font"] = ("Arial", "10")
self.pause["width"] = 15
self.pause.pack(side=LEFT)
#2º container
self.info = Frame(master)
self.info["pady"] = 5
self.info.pack(side=TOP)
#Texto para exibir o arquivo que está sendo executado no momento
self.avisoExe = Label(self.info,text="Executando null")
self.avisoExe["font"] = ("Arial", "10")
self.avisoExe.pack(side=TOP)
#Label que mostra o processo que está sendo executado
self.pAtual = Label(self.info, text="Processo Atual: --- \nEstado atual: --- \n\nCiclos do processo executados: --- / --- \nMemória consumida (MB): 0")
self.pAtual["font"] = ("Arial", "10")
#self.pAtual.place(x=500,y=20)
self.pAtual.pack(side=BOTTOM)
# 3º container
self.memInfo = Frame(master)
self.memInfo.pack()
#Funções relacinadas a exibição da memória usada
self.mem = Label(self.memInfo, text="Memória ocupada: "+
str(self.sist.pegaRamUsada())+"MB / "+
str(self.sist.pegaTotalRam())+"MB")
self.mem["font"] = ("Arial", "10")
self.mem.pack(side=TOP)
#Função para ter a barra vermelha ao completar 90%
self.style = ttk.Style()
self.style.theme_use('classic')
self.style.configure("red.Horizontal.TProgressbar", background='red')
self.memBar = ttk.Progressbar(self.memInfo, orient ="horizontal",length = 200, mode ="determinate")
self.memBar["maximum"] = 8192
self.memBar["value"] = self.sist.pegaRamUsada()
self.memBar.pack(side=LEFT)
self.porcentBar = Label(self.memInfo, text=self.percentMem())
self.porcentBar.pack(side=LEFT)
self.listasExecutando = Label(master, text=self.listasAtuais())
self.listasExecutando.pack()
self.terminados = Scrollbar(master)
self.terminados.place(x=700,y=5)
self.listboxTerminados = Listbox(master, yscrollcommand=self.terminados.set)
self.listboxTerminados.place(x=577, y=5)
self.terminados.config(command=self.listboxTerminados.yview)
self.listboxTerminados.insert(END, "Processos terminados:")
self.listboxTerminados.bind('<<ListboxSelect>>', self.CurSelet)
#Funções para mostrar as oscilaões das variáveis do sistema
self.tempo = Label(master, text="Ciclos totais : "+str(self.sist.pegaTempoAtual()))
self.tempo.place(x=10, y=5)
self.impDisp = Label(master,
text="Impressoras disponíveis: " + str(self.sist.dispositivosESLivres(0)))
self.impDisp.place(x=10, y=25)
self.scnDisp = Label(master,
text="Scanners disponíveis: " + str(self.sist.dispositivosESLivres(1)))
self.scnDisp.place(x=10, y=45)
self.mdmDisp = Label(master,
text="Modems disponíveis: " + str(self.sist.dispositivosESLivres(2)))
self.mdmDisp.place(x=10, y=65)
#self.mdmDisp.pack()
self.cdDisp = Label(master,
text="Drives de CD disponíveis: " + str(self.sist.dispositivosESLivres(3)))
self.cdDisp.place(x=10, y=85)
self.totalProcessos = Label(master,
text="Processos executados: 0/" + str(self.tProcessos))
self.totalProcessos.place(x=10, y=105)
# Botão para fechar o sistema
self.sair = Button(master, text="Sair")
self.sair["font"] = ("Calibri", "10")
self.sair["width"] = 10
self.sair["command"] = root.destroy
self.sair.place(x=640,y=455)
def pausar(self):
if (self.pausado == FALSE):
root.after_cancel(AFTER)
self.pausado = TRUE
else:
self.atualizaDados()
self.pausado = FALSE
def create_window(self, processo):
win = Toplevel(root)
for i in self.sist.listaTerminados:
if (processo == i.pegaId()):
message ="Tempo de chegada: " + str(i.pegaTempoChegada()) + "\n" + \
"Prioridade: " + str(i.pegaPrioridade()) + "\n" + \
"Tempo de serviço: " + str(i.pegaTempoDeServico()) + "\n" + \
"Memória consumida (MB): " + str(i.pegaMemoriaOcupada()) + "\n" + \
"Impressoras usadas: " + str(i.pegaNumDePerifericos()[0]) + "\n" + \
"Scanners usados: " + str(i.pegaNumDePerifericos()[1]) + "\n" + \
"Modems usados: " + str(i.pegaNumDePerifericos()[2]) + "\n" + \
"Drivers de CD usados: " + str(i.pegaNumDePerifericos()[3]) + "\n" + \
"Tempo de início: " + str(i.pegaTempoInicio()) + "\n" + \
"Tempo total do processo: " + str(i.pegaTempoTotal()) + "\n" + \
"Tempo total suspenso: " + str(i.pegaTempoSuspenso()) + "\n" + \
"Tempo total bloqueado: " + str(i.pegaTempoBloqueado()) + "\n" + \
"Estado atual: " + i.stringEstado()
titulo = "Histórico do processo " + i.pegaId()
Label(win, text=message).pack()
win.iconbitmap('win.ico')
win.title(titulo)
Button(win, text='OK', command=win.destroy).pack()
def CurSelet(self, evt):
aux = self.listboxTerminados.get(self.listboxTerminados.curselection())
if(len(aux)<=10):
self.create_window(aux)
def listasAtuais(self):
texto = ""
for i in range(len(self.esc.filas)):
if (i == 0): texto += "Fila de Tempo Real: "
if (i == 1): texto += "\nFila de Usuário 1: "
if (i == 2): texto += "\nFila de Usuário 2: "
if (i == 3): texto += "\nFila de Usuário 3: "
for p in self.esc.filas[i]:
if (p.pegaTempoChegada() <= self.sist.pegaTempoAtual()): texto += str(p.pegaId()+" - ")
return texto
def addTerminados(self):
if(len(self.sist.listaTerminados)>self.termAux):
self.listboxTerminados.insert(END, str((self.sist.listaTerminados[self.termAux]).pegaId()))
self.termAux += 1
def percentMem(self):
if (self.memBar["value"]==0):
return "0%"
return str(int((self.memBar["value"]/self.memBar["maximum"])*100))+"%"
#função relacionada ao botão escolherArquivo
def escolherArq(self):
self.arq = fileChooser() #abre a busca de arquivos do sistema
self.desp = Despachante(self.arq, self.sist.pegaTotalRam()) #cria o despachante após escolher o arquivo
#função relacionada ao botão de executar o sistema
def escalonarProcessos(self, event):
self.tProcessos = len(self.desp.fEntrada)
self.totalProcessos["text"] = "Processos executados: " + str(len(self.sist.listaTerminados)) + " / " + str(self.tProcessos)
self.textoExecutando = str(self.arq).split("/")
self.textoExecutando = self.textoExecutando[len(self.textoExecutando)-1]
self.avisoExe["text"] = "Executando: " + self.textoExecutando #Mostra o arquivo que está sendo executado
self.executando = TRUE
#função que auxilia o loop principal
self.i = 0
return
def atualizaDados(self):
#atualizadores dos textos
self.mem["text"] = "Memória usada: "+str(self.sist.pegaRamUsada())+"MB / "+\
str(self.sist.pegaTotalRam())+"MB"
self.impDisp["text"] = "Impressoras disponíveis: " + str(self.sist.dispositivosESLivres(0))
self.scnDisp["text"] = "Scanners disponíveis: " + str(self.sist.dispositivosESLivres(1))
self.mdmDisp["text"] = "Modems disponíveis: " + str(self.sist.dispositivosESLivres(2))
self.cdDisp["text"] = "Drives de CD disponíveis: " + str(self.sist.dispositivosESLivres(3))
# Aloca recursos de E/S de um processo:
self.addTerminados()
self.listasExecutando["text"]=self.listasAtuais()
#atualizador da barra
self.memBar["value"] = self.sist.pegaRamUsada()
self.porcentBar["text"] = self.percentMem()
if (self.memBar["value"] >= 0.9*self.sist.pegaTotalRam()):
self.memBar["style"] = "red.Horizontal.TProgressbar"
if (self.memBar["value"] < 0.9*self.sist.pegaTotalRam()):
self.memBar["style"] = ""
#executa uma iteração do escalonamento
if (self.executando):
self.tempo["text"] = "Ciclos totais : " + str(self.sist.pegaTempoAtual()) + " ciclos"
if(self.esc.pAtual): self.pAtual["text"] = "Processo Atual: " + str(self.esc.pAtual)
else: self.pAtual["text"] = "Processo Atual : --- \nEstado atual: --- \n\nCiclos do processo executados: --- / --- \nMemória consumida (MB): 0"
fTr, fUs1, fUs2, fUs3 = self.desp.submeteProcessos(self.sist.pegaTempoAtual())
self.esc.atualizaFilas(fTr, fUs1, fUs2, fUs3)
self.sist.executa(self.esc)
self.i += 1
self.totalProcessos["text"] = "Processos executados: " + str(len(self.sist.listaTerminados)) + "/" + str(self.tProcessos)
if(self.tProcessos == len(self.sist.listaTerminados)):
self.executando = FALSE
self.pAtual["text"] = "Processo Atual : --- \nEstado atual: --- \n\nCiclos do processo executados: --- / --- \nMemória consumida (MB): 0"
self.avisoExe["text"] = "Finalizado "+ self.textoExecutando
root.update()
global AFTER
AFTER = root.after(100, self.atualizaDados)
#funçoes para o funcionamento e criação da janela
root = Tk()
app = EscDeProcessos(root)
root.title("Escalonador de Processos v0.01")
#root.iconbitmap('win.ico')
app.atualizaDados()
root.mainloop() | from classes.Despachante import *
from classes.Sistema import *
from classes.Processo import *
from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askopenfilename as fileChooser
class EscDeProcessos:
def __init__(self, master=None):
#Tamanho da janela
master.minsize(width=720, height=480)
master.maxsize(width=720, height=480)
#Variavel para saber quando o programa deve iniciar
self.executando = FALSE
#Iniciadores das classes
self.sist = Sistema()
self.arq = "processos.txt"
#self.desp = None
self.desp = Despachante(self.arq, self.sist.pegaTotalRam())
self.esc = Escalonador(2)
self.termAux = 0
self.pausado = FALSE
self.tProcessos = 0
#1º container
self.menu = Frame(master)
self.menu["pady"] = 5
self.menu.pack(side=TOP)
#Botão de escolher o arquivo de processos no PC
self.escolherArq = Button(self.menu, text='Escolher Arquivo', command=self.escolherArq)
self.escolherArq["font"] = ("Arial", "10")
self.escolherArq["width"] = 15
self.escolherArq.pack(side=LEFT)
#Botão que inicia a execução do sistema
self.executar = Button(self.menu, text="Escalonar Processos")
self.executar.bind("<Button-1>", self.escalonarProcessos)
self.executar["font"] = ("Arial", "10")
self.executar["width"] = 15
self.executar.pack(side=LEFT)
self.pause = Button(self.menu, text="Pausar", command=self.pausar)
self.pause["font"] = ("Arial", "10")
self.pause["width"] = 15
self.pause.pack(side=LEFT)
#2º container
self.info = Frame(master)
self.info["pady"] = 5
self.info.pack(side=TOP)
#Texto para exibir o arquivo que está sendo executado no momento
self.avisoExe = Label(self.info,text="Executando null")
self.avisoExe["font"] = ("Arial", "10")
self.avisoExe.pack(side=TOP)
#Label que mostra o processo que está sendo executado
self.pAtual = Label(self.info, text="Processo Atual: --- \nEstado atual: --- \n\nCiclos do processo executados: --- / --- \nMemória consumida (MB): 0")
self.pAtual["font"] = ("Arial", "10")
#self.pAtual.place(x=500,y=20)
self.pAtual.pack(side=BOTTOM)
# 3º container
self.memInfo = Frame(master)
self.memInfo.pack()
#Funções relacinadas a exibição da memória usada
self.mem = Label(self.memInfo, text="Memória ocupada: "+
str(self.sist.pegaRamUsada())+"MB / "+
str(self.sist.pegaTotalRam())+"MB")
self.mem["font"] = ("Arial", "10")
self.mem.pack(side=TOP)
#Função para ter a barra vermelha ao completar 90%
self.style = ttk.Style()
self.style.theme_use('classic')
self.style.configure("red.Horizontal.TProgressbar", background='red')
self.memBar = ttk.Progressbar(self.memInfo, orient ="horizontal",length = 200, mode ="determinate")
self.memBar["maximum"] = 8192
self.memBar["value"] = self.sist.pegaRamUsada()
self.memBar.pack(side=LEFT)
self.porcentBar = Label(self.memInfo, text=self.percentMem())
self.porcentBar.pack(side=LEFT)
self.listasExecutando = Label(master, text=self.listasAtuais())
self.listasExecutando.pack()
self.terminados = Scrollbar(master)
self.terminados.place(x=700,y=5)
self.listboxTerminados = Listbox(master, yscrollcommand=self.terminados.set)
self.listboxTerminados.place(x=577, y=5)
self.terminados.config(command=self.listboxTerminados.yview)
self.listboxTerminados.insert(END, "Processos terminados:")
self.listboxTerminados.bind('<<ListboxSelect>>', self.CurSelet)
#Funções para mostrar as oscilaões das variáveis do sistema
self.tempo = Label(master, text="Ciclos totais : "+str(self.sist.pegaTempoAtual()))
self.tempo.place(x=10, y=5)
self.impDisp = Label(master,
text="Impressoras disponíveis: " + str(self.sist.dispositivosESLivres(0)))
self.impDisp.place(x=10, y=25)
self.scnDisp = Label(master,
text="Scanners disponíveis: " + str(self.sist.dispositivosESLivres(1)))
self.scnDisp.place(x=10, y=45)
self.mdmDisp = Label(master,
text="Modems disponíveis: " + str(self.sist.dispositivosESLivres(2)))
self.mdmDisp.place(x=10, y=65)
#self.mdmDisp.pack()
self.cdDisp = Label(master,
text="Drives de CD disponíveis: " + str(self.sist.dispositivosESLivres(3)))
self.cdDisp.place(x=10, y=85)
self.totalProcessos = Label(master,
text="Processos executados: 0/" + str(self.tProcessos))
self.totalProcessos.place(x=10, y=105)
# Botão para fechar o sistema
self.sair = Button(master, text="Sair")
self.sair["font"] = ("Calibri", "10")
self.sair["width"] = 10
self.sair["command"] = root.destroy
self.sair.place(x=640,y=455)
def pausar(self):
if (self.pausado == FALSE):
root.after_cancel(AFTER)
self.pausado = TRUE
else:
self.atualizaDados()
self.pausado = FALSE
def create_window(self, processo):
win = Toplevel(root)
for i in self.sist.listaTerminados:
if (processo == i.pegaId()):
message ="Tempo de chegada: " + str(i.pegaTempoChegada()) + "\n" + \
"Prioridade: " + str(i.pegaPrioridade()) + "\n" + \
"Tempo de serviço: " + str(i.pegaTempoDeServico()) + "\n" + \
"Memória consumida (MB): " + str(i.pegaMemoriaOcupada()) + "\n" + \
"Impressoras usadas: " + str(i.pegaNumDePerifericos()[0]) + "\n" + \
"Scanners usados: " + str(i.pegaNumDePerifericos()[1]) + "\n" + \
"Modems usados: " + str(i.pegaNumDePerifericos()[2]) + "\n" + \
"Drivers de CD usados: " + str(i.pegaNumDePerifericos()[3]) + "\n" + \
"Tempo de início: " + str(i.pegaTempoInicio()) + "\n" + \
"Tempo total do processo: " + str(i.pegaTempoTotal()) + "\n" + \
"Tempo total suspenso: " + str(i.pegaTempoSuspenso()) + "\n" + \
"Tempo total bloqueado: " + str(i.pegaTempoBloqueado()) + "\n" + \
"Estado atual: " + i.stringEstado()
titulo = "Histórico do processo " + i.pegaId()
Label(win, text=message).pack()
win.iconbitmap('win.ico')
win.title(titulo)
Button(win, text='OK', command=win.destroy).pack()
def CurSelet(self, evt):
aux = self.listboxTerminados.get(self.listboxTerminados.curselection())
if(len(aux)<=10):
self.create_window(aux)
def listasAtuais(self):
texto = ""
for i in range(len(self.esc.filas)):
if (i == 0): texto += "Fila de Tempo Real: "
if (i == 1): texto += "\nFila de Usuário 1: "
if (i == 2): texto += "\nFila de Usuário 2: "
if (i == 3): texto += "\nFila de Usuário 3: "
for p in self.esc.filas[i]:
if (p.pegaTempoChegada() <= self.sist.pegaTempoAtual()): texto += str(p.pegaId()+" - ")
return texto
def addTerminados(self):
if(len(self.sist.listaTerminados)>self.termAux):
self.listboxTerminados.insert(END, str((self.sist.listaTerminados[self.termAux]).pegaId()))
self.termAux += 1
def percentMem(self):
if (self.memBar["value"]==0):
return "0%"
return str(int((self.memBar["value"]/self.memBar["maximum"])*100))+"%"
#função relacionada ao botão escolherArquivo
def escolherArq(self):
self.arq = fileChooser() #abre a busca de arquivos do sistema
self.desp = Despachante(self.arq, self.sist.pegaTotalRam()) #cria o despachante após escolher o arquivo
#função relacionada ao botão de executar o sistema
def escalonarProcessos(self, event):
self.tProcessos = len(self.desp.fEntrada)
self.totalProcessos["text"] = "Processos executados: " + str(len(self.sist.listaTerminados)) + " / " + str(self.tProcessos)
self.textoExecutando = str(self.arq).split("/")
self.textoExecutando = self.textoExecutando[len(self.textoExecutando)-1]
self.avisoExe["text"] = "Executando: " + self.textoExecutando #Mostra o arquivo que está sendo executado
self.executando = TRUE
#função que auxilia o loop principal
self.i = 0
return
def atualizaDados(self):
#atualizadores dos textos
self.mem["text"] = "Memória usada: "+str(self.sist.pegaRamUsada())+"MB / "+\
str(self.sist.pegaTotalRam())+"MB"
self.impDisp["text"] = "Impressoras disponíveis: " + str(self.sist.dispositivosESLivres(0))
self.scnDisp["text"] = "Scanners disponíveis: " + str(self.sist.dispositivosESLivres(1))
self.mdmDisp["text"] = "Modems disponíveis: " + str(self.sist.dispositivosESLivres(2))
self.cdDisp["text"] = "Drives de CD disponíveis: " + str(self.sist.dispositivosESLivres(3))
# Aloca recursos de E/S de um processo:
self.addTerminados()
self.listasExecutando["text"]=self.listasAtuais()
#atualizador da barra
self.memBar["value"] = self.sist.pegaRamUsada()
self.porcentBar["text"] = self.percentMem()
if (self.memBar["value"] >= 0.9*self.sist.pegaTotalRam()):
self.memBar["style"] = "red.Horizontal.TProgressbar"
if (self.memBar["value"] < 0.9*self.sist.pegaTotalRam()):
self.memBar["style"] = ""
#executa uma iteração do escalonamento
if (self.executando):
self.tempo["text"] = "Ciclos totais : " + str(self.sist.pegaTempoAtual()) + " ciclos"
if(self.esc.pAtual): self.pAtual["text"] = "Processo Atual: " + str(self.esc.pAtual)
else: self.pAtual["text"] = "Processo Atual : --- \nEstado atual: --- \n\nCiclos do processo executados: --- / --- \nMemória consumida (MB): 0"
fTr, fUs1, fUs2, fUs3 = self.desp.submeteProcessos(self.sist.pegaTempoAtual())
self.esc.atualizaFilas(fTr, fUs1, fUs2, fUs3)
self.sist.executa(self.esc)
self.i += 1
self.totalProcessos["text"] = "Processos executados: " + str(len(self.sist.listaTerminados)) + "/" + str(self.tProcessos)
if(self.tProcessos == len(self.sist.listaTerminados)):
self.executando = FALSE
self.pAtual["text"] = "Processo Atual : --- \nEstado atual: --- \n\nCiclos do processo executados: --- / --- \nMemória consumida (MB): 0"
self.avisoExe["text"] = "Finalizado "+ self.textoExecutando
root.update()
global AFTER
AFTER = root.after(100, self.atualizaDados)
#funçoes para o funcionamento e criação da janela
root = Tk()
app = EscDeProcessos(root)
root.title("Escalonador de Processos v0.01")
#root.iconbitmap('win.ico')
app.atualizaDados()
root.mainloop() | en | null | null |
0036153.py | #!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reports binary size metrics for LaCrOS build artifacts.
More information at //docs/speed/binary_size/metrics.md.
"""
import argparse
import collections
import contextlib
import json
import logging
import os
import subprocess
import sys
import tempfile
@contextlib.contextmanager
def _SysPath(path):
"""Library import context that temporarily appends |path| to |sys.path|."""
if path and path not in sys.path:
sys.path.insert(0, path)
else:
path = None # Indicates that |sys.path| is not modified.
try:
yield
finally:
if path:
sys.path.pop(0)
DIR_SOURCE_ROOT = os.environ.get(
'CHECKOUT_SOURCE_ROOT',
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
BUILD_COMMON_PATH = os.path.join(DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common')
TRACING_PATH = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'catapult',
'tracing')
EU_STRIP_PATH = os.path.join(DIR_SOURCE_ROOT, 'buildtools', 'third_party',
'eu-strip', 'bin', 'eu-strip')
with _SysPath(BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
with _SysPath(TRACING_PATH):
from tracing.value import convert_chart_json # pylint: disable=import-error
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'LaCrOS resource size information.',
'trace_rerun_options': [],
'charts': {}
}
_KEY_RAW = 'raw'
_KEY_GZIPPED = 'gzipped'
_KEY_STRIPPED = 'stripped'
_KEY_STRIPPED_GZIPPED = 'stripped_then_gzipped'
class _Group:
"""A group of build artifacts whose file sizes are summed and tracked.
Build artifacts for size tracking fall under these categories:
* File: A single file.
* Group: A collection of files.
* Dir: All files under a directory.
Attributes:
paths: A list of files or directories to be tracked together.
title: The display name of the group.
track_stripped: Whether to also track summed stripped ELF sizes.
track_compressed: Whether to also track summed compressed sizes.
"""
def __init__(self, paths, title, track_stripped=False,
track_compressed=False):
self.paths = paths
self.title = title
self.track_stripped = track_stripped
self.track_compressed = track_compressed
# List of disjoint build artifact groups for size tracking. This list should be
# synched with lacros-amd64-generic-binary-size-rel builder contents (specified
# in # //infra/config/subprojects/chromium/ci.star) and
# chromeos-amd64-generic-lacros-internal builder (specified in src-internal).
_TRACKED_GROUPS = [
_Group(paths=['chrome'],
title='File: chrome',
track_stripped=True,
track_compressed=True),
_Group(paths=['chrome_crashpad_handler'],
title='File: chrome_crashpad_handler'),
_Group(paths=['icudtl.dat'], title='File: icudtl.dat'),
_Group(paths=['nacl_helper'], title='File: nacl_helper'),
_Group(paths=['nacl_irt_x86_64.nexe'], title='File: nacl_irt_x86_64.nexe'),
_Group(paths=['resources.pak'], title='File: resources.pak'),
_Group(paths=[
'chrome_100_percent.pak', 'chrome_200_percent.pak', 'headless_lib.pak'
],
title='Group: Other PAKs'),
_Group(paths=['snapshot_blob.bin'], title='Group: Misc'),
_Group(paths=['locales/'], title='Dir: locales'),
_Group(paths=['swiftshader/'], title='Dir: swiftshader'),
_Group(paths=['WidevineCdm/'], title='Dir: WidevineCdm'),
]
def _visit_paths(base_dir, paths):
"""Itemizes files specified by a list of paths.
Args:
base_dir: Base directory for all elements in |paths|.
paths: A list of filenames or directory names to specify files whose sizes
to be counted. Directories are recursed. There's no de-duping effort.
Non-existing files or directories are ignored (with warning message).
"""
for path in paths:
full_path = os.path.join(base_dir, path)
if os.path.exists(full_path):
if os.path.isdir(full_path):
for dirpath, _, filenames in os.walk(full_path):
for filename in filenames:
yield os.path.join(dirpath, filename)
else: # Assume is file.
yield full_path
else:
logging.critical('Not found: %s', path)
def _is_probably_elf(filename):
"""Heuristically decides whether |filename| is ELF via magic signature."""
with open(filename, 'rb') as fh:
return fh.read(4) == '\x7FELF'
def _is_unstrippable_elf(filename):
"""Identifies known-unstrippable ELF files to denoise the system."""
return filename.endswith('.nexe') or filename.endswith('libwidevinecdm.so')
def _get_filesize(filename):
"""Returns the size of a file, or 0 if file is not found."""
try:
return os.path.getsize(filename)
except OSError:
logging.critical('Failed to get size: %s', filename)
return 0
def _get_gzipped_filesize(filename):
"""Returns the gzipped size of a file, or 0 if file is not found."""
BUFFER_SIZE = 65536
if not os.path.isfile(filename):
return 0
try:
# Call gzip externally instead of using gzip package since it's > 2x faster.
cmd = ['gzip', '-c', filename]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# Manually counting bytes instead of using len(p.communicate()[0]) to avoid
# buffering the entire compressed data (can be ~100 MB).
ret = 0
while True:
chunk = len(p.stdout.read(BUFFER_SIZE))
if chunk == 0:
break
ret += chunk
return ret
except OSError:
logging.critical('Failed to get gzipped size: %s', filename)
return 0
def _get_catagorized_filesizes(filename):
"""Measures |filename| sizes under various transforms.
Returns: A Counter (keyed by _Key_* constants) that stores measured sizes.
"""
sizes = collections.Counter()
sizes[_KEY_RAW] = _get_filesize(filename)
sizes[_KEY_GZIPPED] = _get_gzipped_filesize(filename)
# Pre-assign values for non-ELF, or in case of failure for ELF.
sizes[_KEY_STRIPPED] = sizes[_KEY_RAW]
sizes[_KEY_STRIPPED_GZIPPED] = sizes[_KEY_GZIPPED]
if _is_probably_elf(filename) and not _is_unstrippable_elf(filename):
try:
fd, temp_file = tempfile.mkstemp()
os.close(fd)
cmd = [EU_STRIP_PATH, filename, '-o', temp_file]
subprocess.check_output(cmd)
sizes[_KEY_STRIPPED] = _get_filesize(temp_file)
sizes[_KEY_STRIPPED_GZIPPED] = _get_gzipped_filesize(temp_file)
if sizes[_KEY_STRIPPED] > sizes[_KEY_RAW]:
# This weird case has been observed for libwidevinecdm.so.
logging.critical('Stripping made things worse for %s' % filename)
except subprocess.CalledProcessError:
logging.critical('Failed to strip file: %s' % filename)
finally:
os.unlink(temp_file)
return sizes
def _dump_chart_json(output_dir, chartjson):
"""Writes chart histogram to JSON files.
Output files:
results-chart.json contains the chart JSON.
perf_results.json contains histogram JSON for Catapult.
Args:
output_dir: Directory to place the JSON files.
chartjson: Source JSON data for output files.
"""
results_path = os.path.join(output_dir, 'results-chart.json')
logging.critical('Dumping chartjson to %s', results_path)
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file, indent=2)
# We would ideally generate a histogram set directly instead of generating
# chartjson then converting. However, perf_tests_results_helper is in
# //build, which doesn't seem to have any precedent for depending on
# anything in Catapult. This can probably be fixed, but since this doesn't
# need to be super fast or anything, converting is a good enough solution
# for the time being.
histogram_result = convert_chart_json.ConvertChartJson(results_path)
if histogram_result.returncode != 0:
raise Exception('chartjson conversion failed with error: ' +
histogram_result.stdout)
histogram_path = os.path.join(output_dir, 'perf_results.json')
logging.critical('Dumping histograms to %s', histogram_path)
with open(histogram_path, 'w') as json_file:
json_file.write(histogram_result.stdout)
def _run_resource_sizes(args):
"""Main flow to extract and output size data."""
chartjson = _BASE_CHART.copy()
report_func = perf_tests_results_helper.ReportPerfResult
total_sizes = collections.Counter()
def report_sizes(sizes, title, track_stripped, track_compressed):
report_func(chart_data=chartjson,
graph_title=title,
trace_title='size',
value=sizes[_KEY_RAW],
units='bytes')
if track_stripped:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped)',
trace_title='size',
value=sizes[_KEY_STRIPPED],
units='bytes')
if track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Gzipped)',
trace_title='size',
value=sizes[_KEY_GZIPPED],
units='bytes')
if track_stripped and track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped, Gzipped)',
trace_title='size',
value=sizes[_KEY_STRIPPED_GZIPPED],
units='bytes')
for g in _TRACKED_GROUPS:
sizes = sum(
map(_get_catagorized_filesizes, _visit_paths(args.out_dir, g.paths)),
collections.Counter())
report_sizes(sizes, g.title, g.track_stripped, g.track_compressed)
# Total compressed size is summed over individual compressed sizes, instead
# of concatanating first, then compress everything. This is done for
# simplicity. It also gives a conservative size estimate (assuming file
# metadata and overheads are negligible).
total_sizes += sizes
report_sizes(total_sizes, 'Total', True, True)
_dump_chart_json(args.output_dir, chartjson)
def main():
"""Parses arguments and runs high level flows."""
argparser = argparse.ArgumentParser(description='Writes LaCrOS size metrics.')
argparser.add_argument('--chromium-output-directory',
dest='out_dir',
required=True,
type=os.path.realpath,
help='Location of the build artifacts.')
output_group = argparser.add_mutually_exclusive_group()
output_group.add_argument('--output-dir',
default='.',
help='Directory to save chartjson to.')
# Accepted to conform to the isolated script interface, but ignored.
argparser.add_argument('--isolated-script-test-filter',
help=argparse.SUPPRESS)
argparser.add_argument('--isolated-script-test-perf-output',
type=os.path.realpath,
help=argparse.SUPPRESS)
output_group.add_argument(
'--isolated-script-test-output',
type=os.path.realpath,
help='File to which results will be written in the simplified JSON '
'output format.')
args = argparser.parse_args()
isolated_script_output = {'valid': False, 'failures': []}
if args.isolated_script_test_output:
test_name = 'lacros_resource_sizes'
args.output_dir = os.path.join(
os.path.dirname(args.isolated_script_test_output), test_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
try:
_run_resource_sizes(args)
isolated_script_output = {'valid': True, 'failures': []}
finally:
if args.isolated_script_test_output:
results_path = os.path.join(args.output_dir, 'test_results.json')
with open(results_path, 'w') as output_file:
json.dump(isolated_script_output, output_file)
with open(args.isolated_script_test_output, 'w') as output_file:
json.dump(isolated_script_output, output_file)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reports binary size metrics for LaCrOS build artifacts.
More information at //docs/speed/binary_size/metrics.md.
"""
import argparse
import collections
import contextlib
import json
import logging
import os
import subprocess
import sys
import tempfile
@contextlib.contextmanager
def _SysPath(path):
"""Library import context that temporarily appends |path| to |sys.path|."""
if path and path not in sys.path:
sys.path.insert(0, path)
else:
path = None # Indicates that |sys.path| is not modified.
try:
yield
finally:
if path:
sys.path.pop(0)
DIR_SOURCE_ROOT = os.environ.get(
'CHECKOUT_SOURCE_ROOT',
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
BUILD_COMMON_PATH = os.path.join(DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common')
TRACING_PATH = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'catapult',
'tracing')
EU_STRIP_PATH = os.path.join(DIR_SOURCE_ROOT, 'buildtools', 'third_party',
'eu-strip', 'bin', 'eu-strip')
with _SysPath(BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
with _SysPath(TRACING_PATH):
from tracing.value import convert_chart_json # pylint: disable=import-error
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'LaCrOS resource size information.',
'trace_rerun_options': [],
'charts': {}
}
_KEY_RAW = 'raw'
_KEY_GZIPPED = 'gzipped'
_KEY_STRIPPED = 'stripped'
_KEY_STRIPPED_GZIPPED = 'stripped_then_gzipped'
class _Group:
"""A group of build artifacts whose file sizes are summed and tracked.
Build artifacts for size tracking fall under these categories:
* File: A single file.
* Group: A collection of files.
* Dir: All files under a directory.
Attributes:
paths: A list of files or directories to be tracked together.
title: The display name of the group.
track_stripped: Whether to also track summed stripped ELF sizes.
track_compressed: Whether to also track summed compressed sizes.
"""
def __init__(self, paths, title, track_stripped=False,
track_compressed=False):
self.paths = paths
self.title = title
self.track_stripped = track_stripped
self.track_compressed = track_compressed
# List of disjoint build artifact groups for size tracking. This list should be
# synched with lacros-amd64-generic-binary-size-rel builder contents (specified
# in # //infra/config/subprojects/chromium/ci.star) and
# chromeos-amd64-generic-lacros-internal builder (specified in src-internal).
_TRACKED_GROUPS = [
_Group(paths=['chrome'],
title='File: chrome',
track_stripped=True,
track_compressed=True),
_Group(paths=['chrome_crashpad_handler'],
title='File: chrome_crashpad_handler'),
_Group(paths=['icudtl.dat'], title='File: icudtl.dat'),
_Group(paths=['nacl_helper'], title='File: nacl_helper'),
_Group(paths=['nacl_irt_x86_64.nexe'], title='File: nacl_irt_x86_64.nexe'),
_Group(paths=['resources.pak'], title='File: resources.pak'),
_Group(paths=[
'chrome_100_percent.pak', 'chrome_200_percent.pak', 'headless_lib.pak'
],
title='Group: Other PAKs'),
_Group(paths=['snapshot_blob.bin'], title='Group: Misc'),
_Group(paths=['locales/'], title='Dir: locales'),
_Group(paths=['swiftshader/'], title='Dir: swiftshader'),
_Group(paths=['WidevineCdm/'], title='Dir: WidevineCdm'),
]
def _visit_paths(base_dir, paths):
"""Itemizes files specified by a list of paths.
Args:
base_dir: Base directory for all elements in |paths|.
paths: A list of filenames or directory names to specify files whose sizes
to be counted. Directories are recursed. There's no de-duping effort.
Non-existing files or directories are ignored (with warning message).
"""
for path in paths:
full_path = os.path.join(base_dir, path)
if os.path.exists(full_path):
if os.path.isdir(full_path):
for dirpath, _, filenames in os.walk(full_path):
for filename in filenames:
yield os.path.join(dirpath, filename)
else: # Assume is file.
yield full_path
else:
logging.critical('Not found: %s', path)
def _is_probably_elf(filename):
"""Heuristically decides whether |filename| is ELF via magic signature."""
with open(filename, 'rb') as fh:
return fh.read(4) == '\x7FELF'
def _is_unstrippable_elf(filename):
"""Identifies known-unstrippable ELF files to denoise the system."""
return filename.endswith('.nexe') or filename.endswith('libwidevinecdm.so')
def _get_filesize(filename):
"""Returns the size of a file, or 0 if file is not found."""
try:
return os.path.getsize(filename)
except OSError:
logging.critical('Failed to get size: %s', filename)
return 0
def _get_gzipped_filesize(filename):
"""Returns the gzipped size of a file, or 0 if file is not found."""
BUFFER_SIZE = 65536
if not os.path.isfile(filename):
return 0
try:
# Call gzip externally instead of using gzip package since it's > 2x faster.
cmd = ['gzip', '-c', filename]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# Manually counting bytes instead of using len(p.communicate()[0]) to avoid
# buffering the entire compressed data (can be ~100 MB).
ret = 0
while True:
chunk = len(p.stdout.read(BUFFER_SIZE))
if chunk == 0:
break
ret += chunk
return ret
except OSError:
logging.critical('Failed to get gzipped size: %s', filename)
return 0
def _get_catagorized_filesizes(filename):
"""Measures |filename| sizes under various transforms.
Returns: A Counter (keyed by _Key_* constants) that stores measured sizes.
"""
sizes = collections.Counter()
sizes[_KEY_RAW] = _get_filesize(filename)
sizes[_KEY_GZIPPED] = _get_gzipped_filesize(filename)
# Pre-assign values for non-ELF, or in case of failure for ELF.
sizes[_KEY_STRIPPED] = sizes[_KEY_RAW]
sizes[_KEY_STRIPPED_GZIPPED] = sizes[_KEY_GZIPPED]
if _is_probably_elf(filename) and not _is_unstrippable_elf(filename):
try:
fd, temp_file = tempfile.mkstemp()
os.close(fd)
cmd = [EU_STRIP_PATH, filename, '-o', temp_file]
subprocess.check_output(cmd)
sizes[_KEY_STRIPPED] = _get_filesize(temp_file)
sizes[_KEY_STRIPPED_GZIPPED] = _get_gzipped_filesize(temp_file)
if sizes[_KEY_STRIPPED] > sizes[_KEY_RAW]:
# This weird case has been observed for libwidevinecdm.so.
logging.critical('Stripping made things worse for %s' % filename)
except subprocess.CalledProcessError:
logging.critical('Failed to strip file: %s' % filename)
finally:
os.unlink(temp_file)
return sizes
def _dump_chart_json(output_dir, chartjson):
"""Writes chart histogram to JSON files.
Output files:
results-chart.json contains the chart JSON.
perf_results.json contains histogram JSON for Catapult.
Args:
output_dir: Directory to place the JSON files.
chartjson: Source JSON data for output files.
"""
results_path = os.path.join(output_dir, 'results-chart.json')
logging.critical('Dumping chartjson to %s', results_path)
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file, indent=2)
# We would ideally generate a histogram set directly instead of generating
# chartjson then converting. However, perf_tests_results_helper is in
# //build, which doesn't seem to have any precedent for depending on
# anything in Catapult. This can probably be fixed, but since this doesn't
# need to be super fast or anything, converting is a good enough solution
# for the time being.
histogram_result = convert_chart_json.ConvertChartJson(results_path)
if histogram_result.returncode != 0:
raise Exception('chartjson conversion failed with error: ' +
histogram_result.stdout)
histogram_path = os.path.join(output_dir, 'perf_results.json')
logging.critical('Dumping histograms to %s', histogram_path)
with open(histogram_path, 'w') as json_file:
json_file.write(histogram_result.stdout)
def _run_resource_sizes(args):
"""Main flow to extract and output size data."""
chartjson = _BASE_CHART.copy()
report_func = perf_tests_results_helper.ReportPerfResult
total_sizes = collections.Counter()
def report_sizes(sizes, title, track_stripped, track_compressed):
report_func(chart_data=chartjson,
graph_title=title,
trace_title='size',
value=sizes[_KEY_RAW],
units='bytes')
if track_stripped:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped)',
trace_title='size',
value=sizes[_KEY_STRIPPED],
units='bytes')
if track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Gzipped)',
trace_title='size',
value=sizes[_KEY_GZIPPED],
units='bytes')
if track_stripped and track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped, Gzipped)',
trace_title='size',
value=sizes[_KEY_STRIPPED_GZIPPED],
units='bytes')
for g in _TRACKED_GROUPS:
sizes = sum(
map(_get_catagorized_filesizes, _visit_paths(args.out_dir, g.paths)),
collections.Counter())
report_sizes(sizes, g.title, g.track_stripped, g.track_compressed)
# Total compressed size is summed over individual compressed sizes, instead
# of concatanating first, then compress everything. This is done for
# simplicity. It also gives a conservative size estimate (assuming file
# metadata and overheads are negligible).
total_sizes += sizes
report_sizes(total_sizes, 'Total', True, True)
_dump_chart_json(args.output_dir, chartjson)
def main():
"""Parses arguments and runs high level flows."""
argparser = argparse.ArgumentParser(description='Writes LaCrOS size metrics.')
argparser.add_argument('--chromium-output-directory',
dest='out_dir',
required=True,
type=os.path.realpath,
help='Location of the build artifacts.')
output_group = argparser.add_mutually_exclusive_group()
output_group.add_argument('--output-dir',
default='.',
help='Directory to save chartjson to.')
# Accepted to conform to the isolated script interface, but ignored.
argparser.add_argument('--isolated-script-test-filter',
help=argparse.SUPPRESS)
argparser.add_argument('--isolated-script-test-perf-output',
type=os.path.realpath,
help=argparse.SUPPRESS)
output_group.add_argument(
'--isolated-script-test-output',
type=os.path.realpath,
help='File to which results will be written in the simplified JSON '
'output format.')
args = argparser.parse_args()
isolated_script_output = {'valid': False, 'failures': []}
if args.isolated_script_test_output:
test_name = 'lacros_resource_sizes'
args.output_dir = os.path.join(
os.path.dirname(args.isolated_script_test_output), test_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
try:
_run_resource_sizes(args)
isolated_script_output = {'valid': True, 'failures': []}
finally:
if args.isolated_script_test_output:
results_path = os.path.join(args.output_dir, 'test_results.json')
with open(results_path, 'w') as output_file:
json.dump(isolated_script_output, output_file)
with open(args.isolated_script_test_output, 'w') as output_file:
json.dump(isolated_script_output, output_file)
if __name__ == '__main__':
main()
| en | null | null |
0035653.py | """
Convert characters (chr) to integer (int) labels and vice versa.
REVIEW: index 0 bug, also see:
https://github.com/baidu-research/warp-ctc/tree/master/tensorflow_binding
`ctc_loss`_ maps labels from 0=<unused>, 1=<space>, 2=a, ..., 27=z, 28=<blank>
See: https://www.tensorflow.org/api_docs/python/tf/nn/ctc_loss
"""
__MAP = r' abcdefghijklmnopqrstuvwxyz' # 27 characters including <space>.
__CTOI = dict()
__ITOC = dict([(0, '')]) # This is in case the net decodes a 0 on step 0.
if not __CTOI or not __ITOC:
for i, c in enumerate(__MAP):
__CTOI.update({c: i + 1})
__ITOC.update({i + 1: c})
def ctoi(char):
"""
Convert character label to integer.
Args:
char (char): Character label.
Returns:
int: Integer representation.
"""
if char not in __MAP:
raise ValueError('Invalid input character \'{}\'.'.format(char))
if not len(char) == 1:
raise ValueError('"{}" is not a valid character.'.format(char))
return __CTOI[char.lower()]
def itoc(integer):
"""
Convert integer label to character.
Args:
integer (int): Integer label.
Returns:
char: Character representation.
"""
if not 0 <= integer < num_classes():
raise ValueError('Integer label ({}) out of range.'.format(integer))
return __ITOC[integer]
def num_classes():
"""
Return number of different classes, +1 for the <blank> label.
Returns:
int: Number of labels +1.
"""
return len(__MAP) + 2
| """
Convert characters (chr) to integer (int) labels and vice versa.
REVIEW: index 0 bug, also see:
https://github.com/baidu-research/warp-ctc/tree/master/tensorflow_binding
`ctc_loss`_ maps labels from 0=<unused>, 1=<space>, 2=a, ..., 27=z, 28=<blank>
See: https://www.tensorflow.org/api_docs/python/tf/nn/ctc_loss
"""
__MAP = r' abcdefghijklmnopqrstuvwxyz' # 27 characters including <space>.
__CTOI = dict()
__ITOC = dict([(0, '')]) # This is in case the net decodes a 0 on step 0.
if not __CTOI or not __ITOC:
for i, c in enumerate(__MAP):
__CTOI.update({c: i + 1})
__ITOC.update({i + 1: c})
def ctoi(char):
"""
Convert character label to integer.
Args:
char (char): Character label.
Returns:
int: Integer representation.
"""
if char not in __MAP:
raise ValueError('Invalid input character \'{}\'.'.format(char))
if not len(char) == 1:
raise ValueError('"{}" is not a valid character.'.format(char))
return __CTOI[char.lower()]
def itoc(integer):
"""
Convert integer label to character.
Args:
integer (int): Integer label.
Returns:
char: Character representation.
"""
if not 0 <= integer < num_classes():
raise ValueError('Integer label ({}) out of range.'.format(integer))
return __ITOC[integer]
def num_classes():
"""
Return number of different classes, +1 for the <blank> label.
Returns:
int: Number of labels +1.
"""
return len(__MAP) + 2
| en | null | null |
0037957.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkimm.endpoint import endpoint_data
class ListProjectsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'ListProjects','imm')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_MaxKeys(self): # Integer
return self.get_query_params().get('MaxKeys')
def set_MaxKeys(self, MaxKeys): # Integer
self.add_query_param('MaxKeys', MaxKeys)
def get_Marker(self): # String
return self.get_query_params().get('Marker')
def set_Marker(self, Marker): # String
self.add_query_param('Marker', Marker)
| # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkimm.endpoint import endpoint_data
class ListProjectsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'imm', '2017-09-06', 'ListProjects','imm')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_MaxKeys(self): # Integer
return self.get_query_params().get('MaxKeys')
def set_MaxKeys(self, MaxKeys): # Integer
self.add_query_param('MaxKeys', MaxKeys)
def get_Marker(self): # String
return self.get_query_params().get('Marker')
def set_Marker(self, Marker): # String
self.add_query_param('Marker', Marker)
| en | null | null |
0001021.py | # Write a Python program to get execution time for a Python method.
import time
def sum_of_n_numbers(x):
start_time = time.time()
s = 0
for i in range(1, x + 1):
s = s + i
end_time = time.time()
return s, end_time - start_time
n = 5
print("\nTime to sum of 1 to ", n, " and required time to calculate is :", sum_of_n_numbers(n))
| # Write a Python program to get execution time for a Python method.
import time
def sum_of_n_numbers(x):
start_time = time.time()
s = 0
for i in range(1, x + 1):
s = s + i
end_time = time.time()
return s, end_time - start_time
n = 5
print("\nTime to sum of 1 to ", n, " and required time to calculate is :", sum_of_n_numbers(n))
| en | null | null |
0017967.py | import re
import time
from django.conf import settings
from django.utils.timezone import make_aware, make_naive, utc
re_pattern = re.compile('[^\u0000-\uD7FF\uE000-\uFFFF]+', re.UNICODE)
def sanitize_unicode(u):
# We may not be able to store all special characters thanks
# to MySQL's boneheadedness, so accept the minor loss of fidelity
# in the cached data fields.
return re_pattern.sub(' ', u)
def could_be_utc(dt):
if settings.USE_TZ:
return make_aware(dt, utc)
else:
if dt.tzinfo:
return make_naive(dt, utc)
else:
return dt
class RetryError(Exception):
def __init__(self, fn, tries, exceptions):
name = getattr(fn, '__name__', None) or str(fn)
super().__init__('%s failed after %d tries' % (name, tries))
self.exceptions = exceptions
def retry_with_backoff(fn, tries=10, wait=0.5, exception_classes=(Exception,)):
exceptions = []
for t in range(tries):
try:
return fn()
except exception_classes as e:
exceptions.append(e)
time.sleep(wait * (1.5**t))
raise RetryError(fn, tries, exceptions)
| import re
import time
from django.conf import settings
from django.utils.timezone import make_aware, make_naive, utc
re_pattern = re.compile('[^\u0000-\uD7FF\uE000-\uFFFF]+', re.UNICODE)
def sanitize_unicode(u):
# We may not be able to store all special characters thanks
# to MySQL's boneheadedness, so accept the minor loss of fidelity
# in the cached data fields.
return re_pattern.sub(' ', u)
def could_be_utc(dt):
if settings.USE_TZ:
return make_aware(dt, utc)
else:
if dt.tzinfo:
return make_naive(dt, utc)
else:
return dt
class RetryError(Exception):
def __init__(self, fn, tries, exceptions):
name = getattr(fn, '__name__', None) or str(fn)
super().__init__('%s failed after %d tries' % (name, tries))
self.exceptions = exceptions
def retry_with_backoff(fn, tries=10, wait=0.5, exception_classes=(Exception,)):
exceptions = []
for t in range(tries):
try:
return fn()
except exception_classes as e:
exceptions.append(e)
time.sleep(wait * (1.5**t))
raise RetryError(fn, tries, exceptions)
| en | null | null |
0048230.py | """
Implements the DIAL-protocol to communicate with the Chromecast
"""
from collections import namedtuple
import json
import logging
import socket
import ssl
import urllib.request
from uuid import UUID
import zeroconf
from .const import CAST_TYPE_CHROMECAST, CAST_TYPES, SERVICE_TYPE_HOST
XML_NS_UPNP_DEVICE = "{urn:schemas-upnp-org:device-1-0}"
FORMAT_BASE_URL_HTTP = "http://{}:8008"
FORMAT_BASE_URL_HTTPS = "https://{}:8443"
_LOGGER = logging.getLogger(__name__)
def get_host_from_service(service, zconf):
"""Resolve host and port from service."""
service_info = None
if service.type == SERVICE_TYPE_HOST:
return service.data + (None,)
try:
service_info = zconf.get_service_info("_googlecast._tcp.local.", service.data)
if service_info:
_LOGGER.debug(
"get_info_from_service resolved service %s to service_info %s",
service,
service_info,
)
except IOError:
pass
return _get_host_from_zc_service_info(service_info) + (service_info,)
def _get_host_from_zc_service_info(service_info: zeroconf.ServiceInfo):
""" Get hostname or IP + port from zeroconf service_info. """
host = None
port = None
if (
service_info
and service_info.port
and (service_info.server or len(service_info.addresses) > 0)
):
if len(service_info.addresses) > 0:
host = socket.inet_ntoa(service_info.addresses[0])
else:
host = service_info.server.lower()
port = service_info.port
return (host, port)
def _get_status(host, services, zconf, path, secure, timeout, context):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
if not host:
for service in services.copy():
host, _, _ = get_host_from_service(service, zconf)
if host:
_LOGGER.debug("Resolved service %s to %s", service, host)
break
headers = {"content-type": "application/json"}
if secure:
url = FORMAT_BASE_URL_HTTPS.format(host) + path
else:
url = FORMAT_BASE_URL_HTTP.format(host) + path
has_context = bool(context)
if secure and not has_context:
context = get_ssl_context()
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req, timeout=timeout, context=context) as response:
data = response.read()
return json.loads(data.decode("utf-8"))
def get_ssl_context():
"""Create an SSL context."""
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_NONE
return context
def get_device_status(host, services=None, zconf=None, timeout=10, context=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
try:
status = _get_status(
host,
services,
zconf,
"/setup/eureka_info?options=detail",
True,
timeout,
context,
)
friendly_name = status.get("name", "Unknown Chromecast")
model_name = "Unknown model name"
manufacturer = "Unknown manufacturer"
if "detail" in status:
model_name = status["detail"].get("model_name", model_name)
manufacturer = status["detail"].get("manufacturer", manufacturer)
udn = status.get("ssdp_udn", None)
cast_type = CAST_TYPES.get(model_name.lower(), CAST_TYPE_CHROMECAST)
uuid = None
if udn:
uuid = UUID(udn.replace("-", ""))
return DeviceStatus(friendly_name, model_name, manufacturer, uuid, cast_type)
except (urllib.error.HTTPError, urllib.error.URLError, OSError, ValueError):
return None
def _get_group_info(host, group):
name = group.get("name", "Unknown group name")
udn = group.get("uuid", None)
uuid = None
if udn:
uuid = UUID(udn.replace("-", ""))
elected_leader = group.get("elected_leader", "")
elected_leader_split = elected_leader.rsplit(":", 1)
leader_host = None
leader_port = None
if elected_leader == "self" and "cast_port" in group:
leader_host = host
leader_port = group["cast_port"]
elif len(elected_leader_split) == 2:
# The port in the URL is not useful, but we can scan the host
leader_host = elected_leader_split[0]
return MultizoneInfo(name, uuid, leader_host, leader_port)
def get_multizone_status(host, services=None, zconf=None, timeout=10, context=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The multizone status as a named tuple.
:rtype: pychromecast.dial.MultizoneStatus or None
"""
try:
status = _get_status(
host,
services,
zconf,
"/setup/eureka_info?params=multizone",
True,
timeout,
context,
)
dynamic_groups = []
if "multizone" in status and "dynamic_groups" in status["multizone"]:
for group in status["multizone"]["dynamic_groups"]:
dynamic_groups.append(_get_group_info(host, group))
groups = []
if "multizone" in status and "groups" in status["multizone"]:
for group in status["multizone"]["groups"]:
groups.append(_get_group_info(host, group))
return MultizoneStatus(dynamic_groups, groups)
except (urllib.error.HTTPError, urllib.error.URLError, OSError, ValueError):
return None
MultizoneInfo = namedtuple("MultizoneInfo", ["friendly_name", "uuid", "host", "port"])
MultizoneStatus = namedtuple("MultizoneStatus", ["dynamic_groups", "groups"])
DeviceStatus = namedtuple(
"DeviceStatus", ["friendly_name", "model_name", "manufacturer", "uuid", "cast_type"]
)
| """
Implements the DIAL-protocol to communicate with the Chromecast
"""
from collections import namedtuple
import json
import logging
import socket
import ssl
import urllib.request
from uuid import UUID
import zeroconf
from .const import CAST_TYPE_CHROMECAST, CAST_TYPES, SERVICE_TYPE_HOST
XML_NS_UPNP_DEVICE = "{urn:schemas-upnp-org:device-1-0}"
FORMAT_BASE_URL_HTTP = "http://{}:8008"
FORMAT_BASE_URL_HTTPS = "https://{}:8443"
_LOGGER = logging.getLogger(__name__)
def get_host_from_service(service, zconf):
"""Resolve host and port from service."""
service_info = None
if service.type == SERVICE_TYPE_HOST:
return service.data + (None,)
try:
service_info = zconf.get_service_info("_googlecast._tcp.local.", service.data)
if service_info:
_LOGGER.debug(
"get_info_from_service resolved service %s to service_info %s",
service,
service_info,
)
except IOError:
pass
return _get_host_from_zc_service_info(service_info) + (service_info,)
def _get_host_from_zc_service_info(service_info: zeroconf.ServiceInfo):
""" Get hostname or IP + port from zeroconf service_info. """
host = None
port = None
if (
service_info
and service_info.port
and (service_info.server or len(service_info.addresses) > 0)
):
if len(service_info.addresses) > 0:
host = socket.inet_ntoa(service_info.addresses[0])
else:
host = service_info.server.lower()
port = service_info.port
return (host, port)
def _get_status(host, services, zconf, path, secure, timeout, context):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
if not host:
for service in services.copy():
host, _, _ = get_host_from_service(service, zconf)
if host:
_LOGGER.debug("Resolved service %s to %s", service, host)
break
headers = {"content-type": "application/json"}
if secure:
url = FORMAT_BASE_URL_HTTPS.format(host) + path
else:
url = FORMAT_BASE_URL_HTTP.format(host) + path
has_context = bool(context)
if secure and not has_context:
context = get_ssl_context()
req = urllib.request.Request(url, headers=headers)
with urllib.request.urlopen(req, timeout=timeout, context=context) as response:
data = response.read()
return json.loads(data.decode("utf-8"))
def get_ssl_context():
"""Create an SSL context."""
context = ssl.SSLContext()
context.verify_mode = ssl.CERT_NONE
return context
def get_device_status(host, services=None, zconf=None, timeout=10, context=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The device status as a named tuple.
:rtype: pychromecast.dial.DeviceStatus or None
"""
try:
status = _get_status(
host,
services,
zconf,
"/setup/eureka_info?options=detail",
True,
timeout,
context,
)
friendly_name = status.get("name", "Unknown Chromecast")
model_name = "Unknown model name"
manufacturer = "Unknown manufacturer"
if "detail" in status:
model_name = status["detail"].get("model_name", model_name)
manufacturer = status["detail"].get("manufacturer", manufacturer)
udn = status.get("ssdp_udn", None)
cast_type = CAST_TYPES.get(model_name.lower(), CAST_TYPE_CHROMECAST)
uuid = None
if udn:
uuid = UUID(udn.replace("-", ""))
return DeviceStatus(friendly_name, model_name, manufacturer, uuid, cast_type)
except (urllib.error.HTTPError, urllib.error.URLError, OSError, ValueError):
return None
def _get_group_info(host, group):
name = group.get("name", "Unknown group name")
udn = group.get("uuid", None)
uuid = None
if udn:
uuid = UUID(udn.replace("-", ""))
elected_leader = group.get("elected_leader", "")
elected_leader_split = elected_leader.rsplit(":", 1)
leader_host = None
leader_port = None
if elected_leader == "self" and "cast_port" in group:
leader_host = host
leader_port = group["cast_port"]
elif len(elected_leader_split) == 2:
# The port in the URL is not useful, but we can scan the host
leader_host = elected_leader_split[0]
return MultizoneInfo(name, uuid, leader_host, leader_port)
def get_multizone_status(host, services=None, zconf=None, timeout=10, context=None):
"""
:param host: Hostname or ip to fetch status from
:type host: str
:return: The multizone status as a named tuple.
:rtype: pychromecast.dial.MultizoneStatus or None
"""
try:
status = _get_status(
host,
services,
zconf,
"/setup/eureka_info?params=multizone",
True,
timeout,
context,
)
dynamic_groups = []
if "multizone" in status and "dynamic_groups" in status["multizone"]:
for group in status["multizone"]["dynamic_groups"]:
dynamic_groups.append(_get_group_info(host, group))
groups = []
if "multizone" in status and "groups" in status["multizone"]:
for group in status["multizone"]["groups"]:
groups.append(_get_group_info(host, group))
return MultizoneStatus(dynamic_groups, groups)
except (urllib.error.HTTPError, urllib.error.URLError, OSError, ValueError):
return None
MultizoneInfo = namedtuple("MultizoneInfo", ["friendly_name", "uuid", "host", "port"])
MultizoneStatus = namedtuple("MultizoneStatus", ["dynamic_groups", "groups"])
DeviceStatus = namedtuple(
"DeviceStatus", ["friendly_name", "model_name", "manufacturer", "uuid", "cast_type"]
)
| en | null | null |
0019568.py | # coding: utf-8
"""
Mux API
Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501
The version of the OpenAPI document: v1
Contact: devex@mux.com
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from mux_python.configuration import Configuration
class UpdateAssetRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'passthrough': 'str'
}
attribute_map = {
'passthrough': 'passthrough'
}
def __init__(self, passthrough=None, local_vars_configuration=None): # noqa: E501
"""UpdateAssetRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._passthrough = None
self.discriminator = None
if passthrough is not None:
self.passthrough = passthrough
@property
def passthrough(self):
"""Gets the passthrough of this UpdateAssetRequest. # noqa: E501
Arbitrary metadata set for the Asset. Max 255 characters. In order to clear this value, the field should be included with an empty string value. # noqa: E501
:return: The passthrough of this UpdateAssetRequest. # noqa: E501
:rtype: str
"""
return self._passthrough
@passthrough.setter
def passthrough(self, passthrough):
"""Sets the passthrough of this UpdateAssetRequest.
Arbitrary metadata set for the Asset. Max 255 characters. In order to clear this value, the field should be included with an empty string value. # noqa: E501
:param passthrough: The passthrough of this UpdateAssetRequest. # noqa: E501
:type passthrough: str
"""
self._passthrough = passthrough
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateAssetRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UpdateAssetRequest):
return True
return self.to_dict() != other.to_dict()
| # coding: utf-8
"""
Mux API
Mux is how developers build online video. This API encompasses both Mux Video and Mux Data functionality to help you build your video-related projects better and faster than ever before. # noqa: E501
The version of the OpenAPI document: v1
Contact: devex@mux.com
Generated by: https://openapi-generator.tech
"""
import inspect
import pprint
import re # noqa: F401
import six
from mux_python.configuration import Configuration
class UpdateAssetRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'passthrough': 'str'
}
attribute_map = {
'passthrough': 'passthrough'
}
def __init__(self, passthrough=None, local_vars_configuration=None): # noqa: E501
"""UpdateAssetRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration.get_default_copy()
self.local_vars_configuration = local_vars_configuration
self._passthrough = None
self.discriminator = None
if passthrough is not None:
self.passthrough = passthrough
@property
def passthrough(self):
"""Gets the passthrough of this UpdateAssetRequest. # noqa: E501
Arbitrary metadata set for the Asset. Max 255 characters. In order to clear this value, the field should be included with an empty string value. # noqa: E501
:return: The passthrough of this UpdateAssetRequest. # noqa: E501
:rtype: str
"""
return self._passthrough
@passthrough.setter
def passthrough(self, passthrough):
"""Sets the passthrough of this UpdateAssetRequest.
Arbitrary metadata set for the Asset. Max 255 characters. In order to clear this value, the field should be included with an empty string value. # noqa: E501
:param passthrough: The passthrough of this UpdateAssetRequest. # noqa: E501
:type passthrough: str
"""
self._passthrough = passthrough
def to_dict(self, serialize=False):
"""Returns the model properties as a dict"""
result = {}
def convert(x):
if hasattr(x, "to_dict"):
args = inspect.getargspec(x.to_dict).args
if len(args) == 1:
return x.to_dict()
else:
return x.to_dict(serialize)
else:
return x
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
attr = self.attribute_map.get(attr, attr) if serialize else attr
if isinstance(value, list):
result[attr] = list(map(
lambda x: convert(x),
value
))
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], convert(item[1])),
value.items()
))
else:
result[attr] = convert(value)
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateAssetRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, UpdateAssetRequest):
return True
return self.to_dict() != other.to_dict()
| en | null | null |
0033287.py | import os
import markdown
import codecs
import difflib
try:
import nose
except ImportError as e:
msg = e.args[0]
msg = msg + ". The nose testing framework is required to run the Python-" \
"Markdown tests. Run `pip install nose` to install the latest version."
e.args = (msg,) + e.args[1:]
raise
from .plugins import HtmlOutput, Markdown, MarkdownSyntaxError
try:
import tidylib
except ImportError:
tidylib = None
try:
import yaml
except ImportError as e:
msg = e.args[0]
msg = msg + ". A YAML library is required to run the Python-Markdown " \
"tests. Run `pip install pyyaml` to install the latest version."
e.args = (msg,) + e.args[1:]
raise
test_dir = os.path.abspath(os.path.dirname(__file__))
class YamlConfig():
def __init__(self, defaults, filename):
""" Set defaults and load config file if it exists. """
self.DEFAULT_SECTION = 'DEFAULT'
self._defaults = defaults
self._config = {}
if os.path.exists(filename):
with codecs.open(filename, encoding="utf-8") as f:
self._config = yaml.load(f)
def get(self, section, option):
""" Get config value for given section and option key. """
if section in self._config and option in self._config[section]:
return self._config[section][option]
return self._defaults[option]
def get_section(self, file):
""" Get name of config section for given file. """
filename = os.path.basename(file)
if filename in self._config:
return filename
else:
return self.DEFAULT_SECTION
def get_args(self, file):
""" Get args to pass to markdown from config for a given file. """
args = {}
section = self.get_section(file)
if section in self._config:
for key in self._config[section].keys():
# Filter out args unique to testing framework
if key not in self._defaults.keys():
args[key] = self.get(section, key)
return args
def get_config(dir_name):
""" Get config for given directory name. """
defaults = {
'normalize': False,
'skip': False,
'input_ext': '.txt',
'output_ext': '.html'
}
config = YamlConfig(defaults, os.path.join(dir_name, 'test.cfg'))
return config
def normalize(text):
""" Normalize whitespace for a string of html using tidylib. """
output, errors = tidylib.tidy_fragment(text, options={
'drop_empty_paras': 0,
'fix_backslash': 0,
'fix_bad_comments': 0,
'fix_uri': 0,
'join_styles': 0,
'lower_literals': 0,
'merge_divs': 0,
'output_xhtml': 1,
'quote_ampersand': 0,
'newline': 'LF'
})
return output
class CheckSyntax(object):
def __init__(self, description=None):
if description:
self.description = 'TestSyntax: "%s"' % description
def __call__(self, file, config):
""" Compare expected output to actual output and report result. """
cfg_section = config.get_section(file)
if config.get(cfg_section, 'skip'):
raise nose.plugins.skip.SkipTest('Test skipped per config.')
input_file = file + config.get(cfg_section, 'input_ext')
with codecs.open(input_file, encoding="utf-8") as f:
input = f.read()
output_file = file + config.get(cfg_section, 'output_ext')
with codecs.open(output_file, encoding="utf-8") as f:
# Normalize line endings
# (on windows, git may have altered line endings).
expected_output = f.read().replace("\r\n", "\n")
output = markdown.markdown(input, **config.get_args(file))
if tidylib and config.get(cfg_section, 'normalize'):
# Normalize whitespace with tidylib before comparing.
expected_output = normalize(expected_output)
output = normalize(output)
elif config.get(cfg_section, 'normalize'):
# Tidylib is not available. Skip this test.
raise nose.plugins.skip.SkipTest(
'Test skipped. Tidylib not available on system.'
)
diff = [l for l in difflib.unified_diff(
expected_output.splitlines(True),
output.splitlines(True),
output_file,
'actual_output.html',
n=3
)]
if diff:
raise MarkdownSyntaxError(
'Output from "%s" failed to match expected '
'output.\n\n%s' % (input_file, ''.join(diff))
)
def TestSyntax():
for dir_name, sub_dirs, files in os.walk(test_dir):
# Get dir specific config settings.
config = get_config(dir_name)
# Loop through files and generate tests.
for file in files:
root, ext = os.path.splitext(file)
if ext == config.get(config.get_section(file), 'input_ext'):
path = os.path.join(dir_name, root)
check_syntax = CheckSyntax(
description=os.path.relpath(path, test_dir)
)
yield check_syntax, path, config
def generate(file, config):
""" Write expected output file for given input. """
cfg_section = config.get_section(file)
if config.get(cfg_section, 'skip') or config.get(cfg_section, 'normalize'):
print('Skipping:', file)
return None
input_file = file + config.get(cfg_section, 'input_ext')
output_file = file + config.get(cfg_section, 'output_ext')
if not os.path.isfile(output_file) or \
os.path.getmtime(output_file) < os.path.getmtime(input_file):
print('Generating:', file)
markdown.markdownFromFile(input=input_file, output=output_file,
encoding='utf-8', **config.get_args(file))
else:
print('Already up-to-date:', file)
def generate_all():
""" Generate expected output for all outdated tests. """
for dir_name, sub_dirs, files in os.walk(test_dir):
# Get dir specific config settings.
config = get_config(dir_name)
# Loop through files and generate tests.
for file in files:
root, ext = os.path.splitext(file)
if ext == config.get(config.get_section(file), 'input_ext'):
generate(os.path.join(dir_name, root), config)
def run():
nose.main(addplugins=[HtmlOutput(), Markdown()])
| import os
import markdown
import codecs
import difflib
try:
import nose
except ImportError as e:
msg = e.args[0]
msg = msg + ". The nose testing framework is required to run the Python-" \
"Markdown tests. Run `pip install nose` to install the latest version."
e.args = (msg,) + e.args[1:]
raise
from .plugins import HtmlOutput, Markdown, MarkdownSyntaxError
try:
import tidylib
except ImportError:
tidylib = None
try:
import yaml
except ImportError as e:
msg = e.args[0]
msg = msg + ". A YAML library is required to run the Python-Markdown " \
"tests. Run `pip install pyyaml` to install the latest version."
e.args = (msg,) + e.args[1:]
raise
test_dir = os.path.abspath(os.path.dirname(__file__))
class YamlConfig():
def __init__(self, defaults, filename):
""" Set defaults and load config file if it exists. """
self.DEFAULT_SECTION = 'DEFAULT'
self._defaults = defaults
self._config = {}
if os.path.exists(filename):
with codecs.open(filename, encoding="utf-8") as f:
self._config = yaml.load(f)
def get(self, section, option):
""" Get config value for given section and option key. """
if section in self._config and option in self._config[section]:
return self._config[section][option]
return self._defaults[option]
def get_section(self, file):
""" Get name of config section for given file. """
filename = os.path.basename(file)
if filename in self._config:
return filename
else:
return self.DEFAULT_SECTION
def get_args(self, file):
""" Get args to pass to markdown from config for a given file. """
args = {}
section = self.get_section(file)
if section in self._config:
for key in self._config[section].keys():
# Filter out args unique to testing framework
if key not in self._defaults.keys():
args[key] = self.get(section, key)
return args
def get_config(dir_name):
""" Get config for given directory name. """
defaults = {
'normalize': False,
'skip': False,
'input_ext': '.txt',
'output_ext': '.html'
}
config = YamlConfig(defaults, os.path.join(dir_name, 'test.cfg'))
return config
def normalize(text):
""" Normalize whitespace for a string of html using tidylib. """
output, errors = tidylib.tidy_fragment(text, options={
'drop_empty_paras': 0,
'fix_backslash': 0,
'fix_bad_comments': 0,
'fix_uri': 0,
'join_styles': 0,
'lower_literals': 0,
'merge_divs': 0,
'output_xhtml': 1,
'quote_ampersand': 0,
'newline': 'LF'
})
return output
class CheckSyntax(object):
def __init__(self, description=None):
if description:
self.description = 'TestSyntax: "%s"' % description
def __call__(self, file, config):
""" Compare expected output to actual output and report result. """
cfg_section = config.get_section(file)
if config.get(cfg_section, 'skip'):
raise nose.plugins.skip.SkipTest('Test skipped per config.')
input_file = file + config.get(cfg_section, 'input_ext')
with codecs.open(input_file, encoding="utf-8") as f:
input = f.read()
output_file = file + config.get(cfg_section, 'output_ext')
with codecs.open(output_file, encoding="utf-8") as f:
# Normalize line endings
# (on windows, git may have altered line endings).
expected_output = f.read().replace("\r\n", "\n")
output = markdown.markdown(input, **config.get_args(file))
if tidylib and config.get(cfg_section, 'normalize'):
# Normalize whitespace with tidylib before comparing.
expected_output = normalize(expected_output)
output = normalize(output)
elif config.get(cfg_section, 'normalize'):
# Tidylib is not available. Skip this test.
raise nose.plugins.skip.SkipTest(
'Test skipped. Tidylib not available on system.'
)
diff = [l for l in difflib.unified_diff(
expected_output.splitlines(True),
output.splitlines(True),
output_file,
'actual_output.html',
n=3
)]
if diff:
raise MarkdownSyntaxError(
'Output from "%s" failed to match expected '
'output.\n\n%s' % (input_file, ''.join(diff))
)
def TestSyntax():
for dir_name, sub_dirs, files in os.walk(test_dir):
# Get dir specific config settings.
config = get_config(dir_name)
# Loop through files and generate tests.
for file in files:
root, ext = os.path.splitext(file)
if ext == config.get(config.get_section(file), 'input_ext'):
path = os.path.join(dir_name, root)
check_syntax = CheckSyntax(
description=os.path.relpath(path, test_dir)
)
yield check_syntax, path, config
def generate(file, config):
""" Write expected output file for given input. """
cfg_section = config.get_section(file)
if config.get(cfg_section, 'skip') or config.get(cfg_section, 'normalize'):
print('Skipping:', file)
return None
input_file = file + config.get(cfg_section, 'input_ext')
output_file = file + config.get(cfg_section, 'output_ext')
if not os.path.isfile(output_file) or \
os.path.getmtime(output_file) < os.path.getmtime(input_file):
print('Generating:', file)
markdown.markdownFromFile(input=input_file, output=output_file,
encoding='utf-8', **config.get_args(file))
else:
print('Already up-to-date:', file)
def generate_all():
""" Generate expected output for all outdated tests. """
for dir_name, sub_dirs, files in os.walk(test_dir):
# Get dir specific config settings.
config = get_config(dir_name)
# Loop through files and generate tests.
for file in files:
root, ext = os.path.splitext(file)
if ext == config.get(config.get_section(file), 'input_ext'):
generate(os.path.join(dir_name, root), config)
def run():
nose.main(addplugins=[HtmlOutput(), Markdown()])
| en | null | null |
0018939.py | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 16:44:36 2020
@author: wantysal
"""
# Standard library import
import numpy as np
# Local import
from mosqito.sound_level_meter.noct_spectrum._getFrequencies import _getFrequencies
def _spectrum_smoothing(freqs_in, spec, noct, low_freq, high_freq, freqs_out):
"""
Compute smoothed spectrum according to the n-th octave band chosen
Parameters
----------
freqs : numpy.array
frequency axis
spec : numpy.array
spectrum in dB
noct : integer
n-th octave-band according to which smooth the spectrum
low_freq : float
lowest frequency of the n-th octave bands
high_freq : float
highest frequency of the n-th octave bands
freqs_out : numpy.array
frequency axis along which the smoothed spectrum is given
Returns
-------
smoothed-spectrum : numpy.array
smoothed spectrum along the given frequency axis
"""
# n-th octave bands filter
filter_freqs = _getFrequencies(
low_freq, high_freq, noct, G=10, fr=1000)["f"]
filter_freqs[len(filter_freqs) - 1, 2] = high_freq
filter_freqs[0, 0] = low_freq
# Smoothed spectrum creation
nb_bands = filter_freqs.shape[0]
smoothed_spectrum = np.zeros((nb_bands))
i = 0
# Each band is considered individually until all of them have been treated
while nb_bands > 0:
# Find the index of the spectral components within the frequency bin
bin_index = np.where(
(freqs_in >= filter_freqs[i, 0]) & (freqs_in <= filter_freqs[i, 2])
)[0]
# If the frequency bin is empty, it is deleted from the list
if len(bin_index) == 0:
smoothed_spectrum = np.delete(smoothed_spectrum, i, axis=0)
filter_freqs = np.delete(filter_freqs, i, axis=0)
nb_bands -= 1
else:
# The spectral components within the frequency bin are averaged on an energy basis
spec_sum = 0
for j in bin_index:
spec_sum += 10 ** (spec[j] / 10)
smoothed_spectrum[i] = 10 * np.log10(spec_sum / len(bin_index))
nb_bands -= 1
i += 1
# Pose of the smoothed spectrum on the frequency-axis
cor = []
low = []
high = []
# Index of the lower, center and higher limit of each frequency bin into the original spectrum
for i in range(len(filter_freqs)):
cor.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 1])))
low.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 0])))
high.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 2])))
smooth_spec = np.zeros((spec.shape))
for i in range(filter_freqs.shape[0]):
smooth_spec[low[i]: high[i]] = smoothed_spectrum[i]
return smooth_spec
| # -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 16:44:36 2020
@author: wantysal
"""
# Standard library import
import numpy as np
# Local import
from mosqito.sound_level_meter.noct_spectrum._getFrequencies import _getFrequencies
def _spectrum_smoothing(freqs_in, spec, noct, low_freq, high_freq, freqs_out):
"""
Compute smoothed spectrum according to the n-th octave band chosen
Parameters
----------
freqs : numpy.array
frequency axis
spec : numpy.array
spectrum in dB
noct : integer
n-th octave-band according to which smooth the spectrum
low_freq : float
lowest frequency of the n-th octave bands
high_freq : float
highest frequency of the n-th octave bands
freqs_out : numpy.array
frequency axis along which the smoothed spectrum is given
Returns
-------
smoothed-spectrum : numpy.array
smoothed spectrum along the given frequency axis
"""
# n-th octave bands filter
filter_freqs = _getFrequencies(
low_freq, high_freq, noct, G=10, fr=1000)["f"]
filter_freqs[len(filter_freqs) - 1, 2] = high_freq
filter_freqs[0, 0] = low_freq
# Smoothed spectrum creation
nb_bands = filter_freqs.shape[0]
smoothed_spectrum = np.zeros((nb_bands))
i = 0
# Each band is considered individually until all of them have been treated
while nb_bands > 0:
# Find the index of the spectral components within the frequency bin
bin_index = np.where(
(freqs_in >= filter_freqs[i, 0]) & (freqs_in <= filter_freqs[i, 2])
)[0]
# If the frequency bin is empty, it is deleted from the list
if len(bin_index) == 0:
smoothed_spectrum = np.delete(smoothed_spectrum, i, axis=0)
filter_freqs = np.delete(filter_freqs, i, axis=0)
nb_bands -= 1
else:
# The spectral components within the frequency bin are averaged on an energy basis
spec_sum = 0
for j in bin_index:
spec_sum += 10 ** (spec[j] / 10)
smoothed_spectrum[i] = 10 * np.log10(spec_sum / len(bin_index))
nb_bands -= 1
i += 1
# Pose of the smoothed spectrum on the frequency-axis
cor = []
low = []
high = []
# Index of the lower, center and higher limit of each frequency bin into the original spectrum
for i in range(len(filter_freqs)):
cor.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 1])))
low.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 0])))
high.append(np.argmin(np.abs(freqs_out - filter_freqs[i, 2])))
smooth_spec = np.zeros((spec.shape))
for i in range(filter_freqs.shape[0]):
smooth_spec[low[i]: high[i]] = smoothed_spectrum[i]
return smooth_spec
| en | null | null |
0008742.py | #!/usr/bin/env python3
"""
An example script to send data to CommCare using the Submission API
Usage:
$ export CCHQ_PROJECT_SPACE=my-project-space
$ export CCHQ_CASE_TYPE=person
$ export CCHQ_USERNAME=user@example.com
$ export CCHQ_PASSWORD=MijByG_se3EcKr.t
$ export CCHQ_USER_ID=c0ffeeeeeb574eb8b5d5036c9a61a483
$ export CCHQ_OWNER_ID=c0ffeeeee1e34b12bb5da0dc838e8406
$ ./submit_data.py sample_data.csv
"""
# (Optional) Configure the following settings with your values
# An XML namespace to identify your XForm submission
FORM_XMLNS = 'http://example.com/submission-api-example-form/'
# A string to identify the origin of your data
DEVICE_ID = "submission_api_example"
# End of configurable settings
import csv
import os
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timezone
from http.client import responses as http_responses
from typing import Any, Iterable, List, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
from jinja2 import Template
COMMCARE_URL = 'https://www.commcarehq.org/'
@dataclass
class CaseProperty:
name: str
value: Any
@dataclass
class Case:
id: str # A UUID. Generated if not given in the data.
name: str # Required
type: str # A name for the case type. e.g. "person" or "site"
modified_on: str # Generated if not given. e.g. "2020-06-08T18:41:33.207Z"
owner_id: str # ID of the user or location that cases must be assigned to
properties: List[CaseProperty] # All other given data
server_modified_on: Optional[str]
def main(filename):
"""
Sends data to CommCare HQ using the Submission API.
"""
data = get_data(filename)
cases = as_cases(data)
xform_str = render_xform(cases)
success, message = submit_xform(xform_str)
return success, message
def get_data(csv_filename) -> Iterable[dict]:
"""
Reads data in CSV format from the given filename, and yields it as
dictionaries.
"""
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
yield from reader
def as_cases(data: Iterable[dict]) -> Iterable[Case]:
"""
Casts dictionaries as Case instances
"""
reserved = ('id', 'name', 'case_type', 'modified_on', 'server_modified_on')
for dict_ in data:
properties = [CaseProperty(name=key, value=value)
for key, value in dict_.items()
if key not in reserved]
yield Case(
id=dict_.get('id', str(uuid.uuid4())),
name=dict_['name'],
type=os.environ['CCHQ_CASE_TYPE'],
modified_on=dict_.get('modified_on', now_utc()),
owner_id=os.environ['CCHQ_OWNER_ID'],
server_modified_on=dict_.get('server_modified_on'),
properties=properties,
)
def render_xform(cases: Iterable[Case]) -> str:
context = {
'form_xmlns': FORM_XMLNS,
'device_id': DEVICE_ID,
'now_utc': now_utc(),
'cchq_username': os.environ['CCHQ_USERNAME'],
'cchq_user_id': os.environ['CCHQ_USER_ID'],
'submission_id': uuid.uuid4().hex,
'cases': list(cases),
}
with open('xform.xml.j2') as template_file:
template = Template(template_file.read())
xform = template.render(**context)
return xform
def submit_xform(xform: str) -> Tuple[bool, str]:
"""
Submits the given XForm to CommCare.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
"""
url = join_url(COMMCARE_URL,
f'/a/{os.environ["CCHQ_PROJECT_SPACE"]}/receiver/api/')
auth = (os.environ['CCHQ_USERNAME'], os.environ['CCHQ_PASSWORD'])
headers = {'Content-Type': 'text/html; charset=UTF-8'}
response = requests.post(url, xform.encode('utf-8'),
headers=headers, auth=auth)
if not 200 <= response.status_code < 300:
return False, http_responses[response.status_code]
return parse_response(response.text)
def parse_response(text: str) -> Tuple[bool, str]:
"""
Parses a CommCare HQ Submission API response.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
>>> text = '''
... <OpenRosaResponse xmlns="http://openrosa.org/http/response">
... <message nature="submit_success"> √ </message>
... </OpenRosaResponse>
... '''
>>> parse_response(text)
(True, ' √ ')
"""
xml = ET.XML(text)
message = xml.find('{http://openrosa.org/http/response}message')
success = message.attrib['nature'] == 'submit_success'
return success, message.text
def join_url(base_url: str, endpoint: str) -> str:
"""
Returns ``base_url`` + ``endpoint`` with the right forward slashes.
>>> join_url('https://example.com/', '/api/foo')
'https://example.com/api/foo'
>>> join_url('https://example.com', 'api/foo')
'https://example.com/api/foo'
"""
return '/'.join((base_url.rstrip('/'), endpoint.lstrip('/')))
def now_utc() -> str:
"""
Returns a UTC timestamp in ISO-8601 format with the offset as "Z".
e.g. "2020-06-08T18:41:33.207Z"
"""
now = datetime.now(tz=timezone.utc)
now_iso = now.isoformat(timespec='milliseconds')
now_iso_z = now_iso.replace('+00:00', 'Z')
return now_iso_z
def missing_env_vars():
env_vars = (
'CCHQ_PROJECT_SPACE',
'CCHQ_CASE_TYPE',
'CCHQ_USERNAME',
'CCHQ_PASSWORD',
'CCHQ_USER_ID',
'CCHQ_OWNER_ID',
)
return [env_var for env_var in env_vars if env_var not in os.environ]
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit()
if missing := missing_env_vars():
print('Missing environment variables:', ', '.join(missing))
sys.exit(1)
success, message = main(sys.argv[1])
print(message)
if not success:
sys.exit(1)
| #!/usr/bin/env python3
"""
An example script to send data to CommCare using the Submission API
Usage:
$ export CCHQ_PROJECT_SPACE=my-project-space
$ export CCHQ_CASE_TYPE=person
$ export CCHQ_USERNAME=user@example.com
$ export CCHQ_PASSWORD=MijByG_se3EcKr.t
$ export CCHQ_USER_ID=c0ffeeeeeb574eb8b5d5036c9a61a483
$ export CCHQ_OWNER_ID=c0ffeeeee1e34b12bb5da0dc838e8406
$ ./submit_data.py sample_data.csv
"""
# (Optional) Configure the following settings with your values
# An XML namespace to identify your XForm submission
FORM_XMLNS = 'http://example.com/submission-api-example-form/'
# A string to identify the origin of your data
DEVICE_ID = "submission_api_example"
# End of configurable settings
import csv
import os
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime, timezone
from http.client import responses as http_responses
from typing import Any, Iterable, List, Optional, Tuple
from xml.etree import ElementTree as ET
import requests
from jinja2 import Template
COMMCARE_URL = 'https://www.commcarehq.org/'
@dataclass
class CaseProperty:
name: str
value: Any
@dataclass
class Case:
id: str # A UUID. Generated if not given in the data.
name: str # Required
type: str # A name for the case type. e.g. "person" or "site"
modified_on: str # Generated if not given. e.g. "2020-06-08T18:41:33.207Z"
owner_id: str # ID of the user or location that cases must be assigned to
properties: List[CaseProperty] # All other given data
server_modified_on: Optional[str]
def main(filename):
"""
Sends data to CommCare HQ using the Submission API.
"""
data = get_data(filename)
cases = as_cases(data)
xform_str = render_xform(cases)
success, message = submit_xform(xform_str)
return success, message
def get_data(csv_filename) -> Iterable[dict]:
"""
Reads data in CSV format from the given filename, and yields it as
dictionaries.
"""
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
yield from reader
def as_cases(data: Iterable[dict]) -> Iterable[Case]:
"""
Casts dictionaries as Case instances
"""
reserved = ('id', 'name', 'case_type', 'modified_on', 'server_modified_on')
for dict_ in data:
properties = [CaseProperty(name=key, value=value)
for key, value in dict_.items()
if key not in reserved]
yield Case(
id=dict_.get('id', str(uuid.uuid4())),
name=dict_['name'],
type=os.environ['CCHQ_CASE_TYPE'],
modified_on=dict_.get('modified_on', now_utc()),
owner_id=os.environ['CCHQ_OWNER_ID'],
server_modified_on=dict_.get('server_modified_on'),
properties=properties,
)
def render_xform(cases: Iterable[Case]) -> str:
context = {
'form_xmlns': FORM_XMLNS,
'device_id': DEVICE_ID,
'now_utc': now_utc(),
'cchq_username': os.environ['CCHQ_USERNAME'],
'cchq_user_id': os.environ['CCHQ_USER_ID'],
'submission_id': uuid.uuid4().hex,
'cases': list(cases),
}
with open('xform.xml.j2') as template_file:
template = Template(template_file.read())
xform = template.render(**context)
return xform
def submit_xform(xform: str) -> Tuple[bool, str]:
"""
Submits the given XForm to CommCare.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
"""
url = join_url(COMMCARE_URL,
f'/a/{os.environ["CCHQ_PROJECT_SPACE"]}/receiver/api/')
auth = (os.environ['CCHQ_USERNAME'], os.environ['CCHQ_PASSWORD'])
headers = {'Content-Type': 'text/html; charset=UTF-8'}
response = requests.post(url, xform.encode('utf-8'),
headers=headers, auth=auth)
if not 200 <= response.status_code < 300:
return False, http_responses[response.status_code]
return parse_response(response.text)
def parse_response(text: str) -> Tuple[bool, str]:
"""
Parses a CommCare HQ Submission API response.
Returns (True, success_message) on success, or (False,
failure_message) on failure.
>>> text = '''
... <OpenRosaResponse xmlns="http://openrosa.org/http/response">
... <message nature="submit_success"> √ </message>
... </OpenRosaResponse>
... '''
>>> parse_response(text)
(True, ' √ ')
"""
xml = ET.XML(text)
message = xml.find('{http://openrosa.org/http/response}message')
success = message.attrib['nature'] == 'submit_success'
return success, message.text
def join_url(base_url: str, endpoint: str) -> str:
"""
Returns ``base_url`` + ``endpoint`` with the right forward slashes.
>>> join_url('https://example.com/', '/api/foo')
'https://example.com/api/foo'
>>> join_url('https://example.com', 'api/foo')
'https://example.com/api/foo'
"""
return '/'.join((base_url.rstrip('/'), endpoint.lstrip('/')))
def now_utc() -> str:
"""
Returns a UTC timestamp in ISO-8601 format with the offset as "Z".
e.g. "2020-06-08T18:41:33.207Z"
"""
now = datetime.now(tz=timezone.utc)
now_iso = now.isoformat(timespec='milliseconds')
now_iso_z = now_iso.replace('+00:00', 'Z')
return now_iso_z
def missing_env_vars():
env_vars = (
'CCHQ_PROJECT_SPACE',
'CCHQ_CASE_TYPE',
'CCHQ_USERNAME',
'CCHQ_PASSWORD',
'CCHQ_USER_ID',
'CCHQ_OWNER_ID',
)
return [env_var for env_var in env_vars if env_var not in os.environ]
if __name__ == '__main__':
if len(sys.argv) != 2:
print(__doc__)
sys.exit()
if missing := missing_env_vars():
print('Missing environment variables:', ', '.join(missing))
sys.exit(1)
success, message = main(sys.argv[1])
print(message)
if not success:
sys.exit(1)
| en | null | null |
0010801.py | # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova import context
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.baremetal import db as bmdb
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BareMetalVIFDriver(object):
def _after_plug(self, instance, vif, pif):
pass
def _after_unplug(self, instance, vif, pif):
pass
def plug(self, instance, vif):
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(vif)s",
{'uuid': instance['uuid'], 'vif': vif})
vif_uuid = vif['id']
ctx = context.get_admin_context()
node = bmdb.bm_node_get_by_instance_uuid(ctx, instance['uuid'])
# TODO(deva): optimize this database query
# this is just searching for a free physical interface
pifs = bmdb.bm_interface_get_all_by_bm_node_id(ctx, node['id'])
for pif in pifs:
if not pif['vif_uuid']:
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid)
LOG.debug("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)",
{'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_plug(instance, vif, pif)
return
# NOTE(deva): should this really be raising an exception
# when there are no physical interfaces left?
raise exception.NovaException(_(
"Baremetal node: %(id)s has no available physical interface"
" for virtual interface %(vif_uuid)s")
% {'id': node['id'], 'vif_uuid': vif_uuid})
def unplug(self, instance, vif):
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(vif)s",
{'uuid': instance['uuid'], 'vif': vif})
vif_uuid = vif['id']
ctx = context.get_admin_context()
try:
pif = bmdb.bm_interface_get_by_vif_uuid(ctx, vif_uuid)
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], None)
LOG.debug("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)",
{'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_unplug(instance, vif, pif)
except exception.NovaException:
LOG.warn(_("no pif for vif_uuid=%s") % vif_uuid)
| # Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from nova import context
from nova import exception
from nova.i18n import _
from nova.openstack.common import log as logging
from nova.virt.baremetal import db as bmdb
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class BareMetalVIFDriver(object):
def _after_plug(self, instance, vif, pif):
pass
def _after_unplug(self, instance, vif, pif):
pass
def plug(self, instance, vif):
LOG.debug("plug: instance_uuid=%(uuid)s vif=%(vif)s",
{'uuid': instance['uuid'], 'vif': vif})
vif_uuid = vif['id']
ctx = context.get_admin_context()
node = bmdb.bm_node_get_by_instance_uuid(ctx, instance['uuid'])
# TODO(deva): optimize this database query
# this is just searching for a free physical interface
pifs = bmdb.bm_interface_get_all_by_bm_node_id(ctx, node['id'])
for pif in pifs:
if not pif['vif_uuid']:
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], vif_uuid)
LOG.debug("pif:%(id)s is plugged (vif_uuid=%(vif_uuid)s)",
{'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_plug(instance, vif, pif)
return
# NOTE(deva): should this really be raising an exception
# when there are no physical interfaces left?
raise exception.NovaException(_(
"Baremetal node: %(id)s has no available physical interface"
" for virtual interface %(vif_uuid)s")
% {'id': node['id'], 'vif_uuid': vif_uuid})
def unplug(self, instance, vif):
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(vif)s",
{'uuid': instance['uuid'], 'vif': vif})
vif_uuid = vif['id']
ctx = context.get_admin_context()
try:
pif = bmdb.bm_interface_get_by_vif_uuid(ctx, vif_uuid)
bmdb.bm_interface_set_vif_uuid(ctx, pif['id'], None)
LOG.debug("pif:%(id)s is unplugged (vif_uuid=%(vif_uuid)s)",
{'id': pif['id'], 'vif_uuid': vif_uuid})
self._after_unplug(instance, vif, pif)
except exception.NovaException:
LOG.warn(_("no pif for vif_uuid=%s") % vif_uuid)
| en | null | null |
0007285.py | import hashlib
from ecdsa.curves import Ed25519, SECP256k1
from .principal import Principal
import ecdsa
class Identity:
def __init__(self, privkey = "", type = "ed25519", anonymous = False):
privkey = bytes(bytearray.fromhex(privkey))
self.anonymous = anonymous
if anonymous:
return
self.key_type = type
if type == 'secp256k1':
if len(privkey) > 0:
self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
else:
self.sk = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
self._privkey = self.sk.to_string().hex()
self.vk = self.sk.get_verifying_key()
self._pubkey = self.vk.to_string().hex()
self._der_pubkey = self.vk.to_der()
elif type == 'ed25519':
if len(privkey) > 0:
self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.Ed25519)
else:
self.sk = ecdsa.SigningKey.generate(curve=ecdsa.Ed25519)
self._privkey = self.sk.to_string().hex()
self.vk = self.sk.get_verifying_key()
self._pubkey = self.vk.to_string().hex()
self._der_pubkey = self.vk.to_der()
else:
raise 'unsupported identity type'
@staticmethod
def from_pem(pem: str):
key = ecdsa.SigningKey.from_pem(pem)
privkey = key.to_string().hex()
type = "unknown"
if key.curve == Ed25519:
type = 'ed25519'
elif key.curve == SECP256k1:
type = 'secp256k1'
return Identity(privkey=privkey, type=type)
def to_pem(self):
pem = self.sk.to_pem(format="pkcs8")
return pem
def sender(self):
if self.anonymous:
return Principal.anonymous()
return Principal.self_authenticating(self._der_pubkey)
def sign(self, msg: bytes):
if self.anonymous:
return (None, None)
if self.key_type == 'ed25519':
sig = self.sk.sign(msg)
return (self._der_pubkey, sig)
elif self.key_type == 'secp256k1':
sig = self.sk.sign(msg)
return (self._der_pubkey, sig)
@property
def privkey(self):
return self._privkey
@property
def pubkey(self):
return self._pubkey
@property
def der_pubkey(self):
return self._der_pubkey
def __repr__(self):
return "Identity(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
def __str__(self):
return "(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
| import hashlib
from ecdsa.curves import Ed25519, SECP256k1
from .principal import Principal
import ecdsa
class Identity:
def __init__(self, privkey = "", type = "ed25519", anonymous = False):
privkey = bytes(bytearray.fromhex(privkey))
self.anonymous = anonymous
if anonymous:
return
self.key_type = type
if type == 'secp256k1':
if len(privkey) > 0:
self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
else:
self.sk = ecdsa.SigningKey.generate(curve=ecdsa.SECP256k1, hashfunc=hashlib.sha256)
self._privkey = self.sk.to_string().hex()
self.vk = self.sk.get_verifying_key()
self._pubkey = self.vk.to_string().hex()
self._der_pubkey = self.vk.to_der()
elif type == 'ed25519':
if len(privkey) > 0:
self.sk = ecdsa.SigningKey.from_string(privkey, curve=ecdsa.Ed25519)
else:
self.sk = ecdsa.SigningKey.generate(curve=ecdsa.Ed25519)
self._privkey = self.sk.to_string().hex()
self.vk = self.sk.get_verifying_key()
self._pubkey = self.vk.to_string().hex()
self._der_pubkey = self.vk.to_der()
else:
raise 'unsupported identity type'
@staticmethod
def from_pem(pem: str):
key = ecdsa.SigningKey.from_pem(pem)
privkey = key.to_string().hex()
type = "unknown"
if key.curve == Ed25519:
type = 'ed25519'
elif key.curve == SECP256k1:
type = 'secp256k1'
return Identity(privkey=privkey, type=type)
def to_pem(self):
pem = self.sk.to_pem(format="pkcs8")
return pem
def sender(self):
if self.anonymous:
return Principal.anonymous()
return Principal.self_authenticating(self._der_pubkey)
def sign(self, msg: bytes):
if self.anonymous:
return (None, None)
if self.key_type == 'ed25519':
sig = self.sk.sign(msg)
return (self._der_pubkey, sig)
elif self.key_type == 'secp256k1':
sig = self.sk.sign(msg)
return (self._der_pubkey, sig)
@property
def privkey(self):
return self._privkey
@property
def pubkey(self):
return self._pubkey
@property
def der_pubkey(self):
return self._der_pubkey
def __repr__(self):
return "Identity(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
def __str__(self):
return "(" + self.key_type + ', ' + self._privkey + ", " + self._pubkey + ")"
| en | null | null |
0019223.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Feb-09-21 22:23
# @Author : Kelly Hwong (dianhuangkan@gmail.com)
import numpy as np
import tensorflow as tf
class XOR_Dataset(tf.keras.utils.Sequence):
"""XOR_Dataset."""
def __init__(
self,
batch_size=1,
shuffle=False,
seed=42,
):
self.X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
self.y = np.array([[0], [1], [1], [0]])
assert batch_size <= 4
self.batch_size = batch_size # one by one learning
self.index = self._set_index_array()
self.shuffle = shuffle
def __getitem__(self, batch_index):
"""Gets batch at batch_index `batch_index`.
Arguments:
batch_index: batch_index of the batch in the Sequence.
Returns:
batch_x, batch_y: a batch of sequence data.
"""
batch_size = self.batch_size
sample_index = \
self.index[batch_index * batch_size:(batch_index+1) * batch_size]
batch_x = np.empty((batch_size, 2))
batch_y = np.empty(batch_size)
for _, i in enumerate(sample_index):
batch_x[_, ] = self.X[i, :]
batch_y[_] = self.y[i, :]
return batch_x, batch_y
def __len__(self):
"""Number of batches in the Sequence.
Returns:
The number of batches in the Sequence.
"""
return int(np.ceil(self.index.shape[0] / self.batch_size))
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def _set_index_array(self):
"""_set_index_array
"""
N = 4
return np.arange(0, N)
def main():
pass
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : Feb-09-21 22:23
# @Author : Kelly Hwong (dianhuangkan@gmail.com)
import numpy as np
import tensorflow as tf
class XOR_Dataset(tf.keras.utils.Sequence):
"""XOR_Dataset."""
def __init__(
self,
batch_size=1,
shuffle=False,
seed=42,
):
self.X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
self.y = np.array([[0], [1], [1], [0]])
assert batch_size <= 4
self.batch_size = batch_size # one by one learning
self.index = self._set_index_array()
self.shuffle = shuffle
def __getitem__(self, batch_index):
"""Gets batch at batch_index `batch_index`.
Arguments:
batch_index: batch_index of the batch in the Sequence.
Returns:
batch_x, batch_y: a batch of sequence data.
"""
batch_size = self.batch_size
sample_index = \
self.index[batch_index * batch_size:(batch_index+1) * batch_size]
batch_x = np.empty((batch_size, 2))
batch_y = np.empty(batch_size)
for _, i in enumerate(sample_index):
batch_x[_, ] = self.X[i, :]
batch_y[_] = self.y[i, :]
return batch_x, batch_y
def __len__(self):
"""Number of batches in the Sequence.
Returns:
The number of batches in the Sequence.
"""
return int(np.ceil(self.index.shape[0] / self.batch_size))
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
def _set_index_array(self):
"""_set_index_array
"""
N = 4
return np.arange(0, N)
def main():
pass
if __name__ == "__main__":
main()
| en | null | null |
0016186.py | """
This module is for managing OMERO imports, making use of the OMERO CLI,
which can be called from a Python script. Note that this code requires
a properly structured import.json file, which is produced during data
intake (using the intake.py module).
"""
import logging
from ezomero import post_dataset, post_project
from ezomero import get_image_ids, link_images_to_dataset
from ezomero import post_screen, link_plates_to_screen
from importlib import import_module
from omero.cli import CLI
from omero.plugins.sessions import SessionsControl
from omero.rtypes import rstring
from omero.sys import Parameters
from omero.gateway import MapAnnotationWrapper
from pathlib import Path
ImportControl = import_module("omero.plugins.import").ImportControl
# Constants
CURRENT_MD_NS = 'jax.org/omeroutils/user_submitted/v0'
# Functions
def set_or_create_project(conn, project_name):
"""Create a new Project unless one already exists with that name.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
project_name : str
The name of the Project needed. If there is no Project with a matching
name in the group specified in ``conn``, a new Project will be created.
Returns
-------
project_id : int
The id of the Project that was either found or created.
"""
ps = conn.getObjects('Project', attributes={'name': project_name})
ps = list(ps)
if len(ps) == 0:
project_id = post_project(conn, project_name)
print(f'Created new Project:{project_id}')
else:
project_id = ps[0].getId()
return project_id
def set_or_create_dataset(conn, project_id, dataset_name):
"""Create a new Dataset unless one already exists with that name/Project.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
project_id : int
Id of Project in which to find/create Dataset.
dataset_name : str
The name of the Dataset needed. If there is no Dataset with a matching
name in the group specified in ``conn``, in the Project specified with
``project_id``, a new Dataset will be created accordingly.
Returns
-------
dataset_id : int
The id of the Dataset that was either found or created.
"""
ds = conn.getObjects('Dataset',
attributes={'name': dataset_name},
opts={'project': project_id})
ds = list(ds)
if len(ds) == 0:
dataset_id = post_dataset(conn, dataset_name, project_id=project_id)
print(f'Created new Dataset:{dataset_id}')
else:
dataset_id = ds[0].getId()
return dataset_id
def set_or_create_screen(conn, screen_name):
"""Create a new Screen unless one already exists with that name.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
screen_name : str
The name of the Screen needed. If there is no Screen with a matching
name in the group specified in ``conn``, a new Screen will be created.
Returns
-------
screen_id : int
The id of the Project that was either found or created.
"""
ss = conn.getObjects('Screen', attributes={'name': screen_name})
ss = list(ss)
if len(ss) == 0:
screen_id = post_screen(conn, screen_name)
print(f'Created new Screen:{screen_id}')
else:
screen_id = ss[0].getId()
return screen_id
def multi_post_map_annotation(conn, object_type, object_ids, kv_dict, ns):
"""Create a single new MapAnnotation and link to multiple images.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object
OMERO connection.
object_type : str
OMERO object type, passed to ``BlitzGateway.getObjects``
object_ids : int or list of ints
IDs of objects to which the new MapAnnotation will be linked.
kv_dict : dict
key-value pairs that will be included in the MapAnnotation
ns : str
Namespace for the MapAnnotation
Notes
-----
All keys and values are converted to strings before saving in OMERO.
Returns
-------
map_ann_id : int
IDs of newly created MapAnnotation
Examples
--------
>>> ns = 'jax.org/jax/example/namespace'
>>> d = {'species': 'human',
'occupation': 'time traveler'
'first name': 'Kyle',
'surname': 'Reese'}
>>> multi_post_map_annotation(conn, "Image", [23,56,78], d, ns)
234
"""
if type(object_ids) not in [list, int]:
raise TypeError('object_ids must be list or integer')
if type(object_ids) is not list:
object_ids = [object_ids]
if len(object_ids) == 0:
raise ValueError('object_ids must contain one or more items')
if type(kv_dict) is not dict:
raise TypeError('kv_dict must be of type `dict`')
kv_pairs = []
for k, v in kv_dict.items():
k = str(k)
v = str(v)
kv_pairs.append([k, v])
map_ann = MapAnnotationWrapper(conn)
map_ann.setNs(str(ns))
map_ann.setValue(kv_pairs)
map_ann.save()
for o in conn.getObjects(object_type, object_ids):
o.linkAnnotation(map_ann)
return map_ann.getId()
# Class definitions
class Importer:
"""Class for managing OMERO imports using OMERO CLI.
Metadata from ``import.json`` (item in 'import_targets') is required for
assigning to Project/Dataset and adding MapAnnotations.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
file_path : pathlike object
Path to the file to imported into OMERO.
import_md : dict
Contains metadata required for import and annotation. Generally, at
item from ``import.json`` ('import_targets').
Attributes
----------
conn : ``omero.gateway.BlitzGateway`` object.
From parameter given at initialization.
file_path : ``pathlib.Path`` object
From parameter given at initialization.
md : dict
From ``import_md`` parameter given at initialization.
session_uuid : str
UUID for OMERO session represented by ``self.conn``. Supplied to
OMERO CLI for connection purposes.
filename : str
Filename of file to be imported. Populated from ``self.md``.
project : str
Name of Project to contain the image. Populated from ``self.md``.
dataset : str
Name of Dataset to contain the image. Poplulated from ``self.md``.
imported : boolean
Flag indicating import status.
image_ids : list of ints
The Ids of the images in OMERO. Populated after a file is imported.
This list may contain one or more images derived from a single file.
"""
def __init__(self, conn, file_path, import_md):
self.conn = conn
self.file_path = Path(file_path)
self.md = import_md
self.session_uuid = conn.getSession().getUuid().val
self.filename = self.md.pop('filename')
if 'project' in self.md.keys():
self.project = self.md.pop('project')
else:
self.project = None
if 'dataset' in self.md.keys():
self.dataset = self.md.pop('dataset')
else:
self.dataset = None
if 'screen' in self.md.keys():
self.screen = self.md.pop('screen')
else:
self.screen = None
self.imported = False
self.image_ids = None
self.plate_ids = None
def get_image_ids(self):
"""Get the Ids of imported images.
Note that this will not find images if they have not been imported.
Also, while image_ids are returned, this method also sets
``self.image_ids``.
Returns
-------
image_ids : list of ints
Ids of images imported from the specified client path, which
itself is derived from ``self.file_path`` and ``self.filename``.
"""
if self.imported is not True:
logging.error(f'File {self.file_path} has not been imported')
return None
else:
q = self.conn.getQueryService()
params = Parameters()
path_query = str(self.file_path).strip('/')
params.map = {"cpath": rstring(path_query)}
results = q.projection(
"SELECT i.id FROM Image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" WHERE u.clientPath=:cpath",
params,
self.conn.SERVICE_OPTS
)
self.image_ids = [r[0].val for r in results]
return self.image_ids
def get_plate_ids(self):
"""Get the Ids of imported plates.
Note that this will not find plates if they have not been imported.
Also, while plate_ids are returned, this method also sets
``self.plate_ids``.
Returns
-------
plate_ids : list of ints
Ids of plates imported from the specified client path, which
itself is derived from ``self.file_path`` and ``self.filename``.
"""
if self.imported is not True:
logging.error(f'File {self.file_path} has not been imported')
return None
else:
print("time to get some IDs")
q = self.conn.getQueryService()
print(q)
params = Parameters()
path_query = str(self.file_path).strip('/')
print(f"path query: f{path_query}")
params.map = {"cpath": rstring(path_query)}
print(params)
results = q.projection(
"SELECT DISTINCT p.id FROM Plate p"
" JOIN p.plateAcquisitions pa"
" JOIN pa.wellSample ws"
" JOIN ws.image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" WHERE u.clientPath=:cpath",
params,
self.conn.SERVICE_OPTS
)
print(results)
self.plate_ids = [r[0].val for r in results]
return self.plate_ids
def annotate_images(self):
"""Post map annotation (``self.md``) to images ``self.image_ids``.
Returns
-------
map_ann_id : int
The Id of the MapAnnotation that was created.
"""
if len(self.image_ids) == 0:
logging.error('No image ids to annotate')
return None
else:
map_ann_id = multi_post_map_annotation(self.conn, "Image",
self.image_ids, self.md,
CURRENT_MD_NS)
return map_ann_id
def annotate_plates(self):
"""Post map annotation (``self.md``) to plates ``self.plate_ids``.
Returns
-------
map_ann_id : int
The Id of the MapAnnotation that was created.
"""
if len(self.plate_ids) == 0:
logging.error('No plate ids to annotate')
return None
else:
map_ann_id = multi_post_map_annotation(self.conn, "Plate",
self.plate_ids, self.md,
CURRENT_MD_NS)
return map_ann_id
def organize_images(self):
"""Move images to ``self.project``/``self.dataset``.
Returns
-------
image_moved : boolean
True if images were found and moved, else False.
"""
if not self.image_ids:
logging.error('No image ids to organize')
return False
orphans = get_image_ids(self.conn)
for im_id in self.image_ids:
if im_id not in orphans:
logging.error(f'Image:{im_id} not an orphan')
else:
project_id = set_or_create_project(self.conn, self.project)
dataset_id = set_or_create_dataset(self.conn,
project_id,
self.dataset)
link_images_to_dataset(self.conn, [im_id], dataset_id)
print(f'Moved Image:{im_id} to Dataset:{dataset_id}')
return True
def organize_plates(self):
"""Move plates to ``self.screen``.
Returns
-------
plate_moved : boolean
True if plates were found and moved, else False.
"""
if len(self.plate_ids) == 0:
logging.error('No plate ids to organize')
return False
for pl_id in self.plate_ids:
screen_id = set_or_create_screen(self.conn, self.screen)
link_plates_to_screen(self.conn, [pl_id], screen_id)
print(f'Moved Plate:{pl_id} to Screen:{screen_id}')
return True
def import_ln_s(self, host, port):
"""Import file using the ``--transfer=ln_s`` option.
Parameters
----------
host : str
Hostname of OMERO server in which images will be imported.
port : int
Port used to connect to OMERO.server.
Returns
-------
import_status : boolean
True if OMERO import returns a 0 exit status, else False.
"""
cli = CLI()
cli.register('import', ImportControl, '_')
cli.register('sessions', SessionsControl, '_')
cli.invoke(['import',
'-k', self.conn.getSession().getUuid().val,
'-s', host,
'-p', str(port),
'--transfer', 'ln_s',
str(self.file_path)])
if cli.rv == 0:
self.imported = True
print(f'Imported {self.file_path}')
return True
else:
logging.error(f'Import of {self.file_path} has failed!')
return False
| """
This module is for managing OMERO imports, making use of the OMERO CLI,
which can be called from a Python script. Note that this code requires
a properly structured import.json file, which is produced during data
intake (using the intake.py module).
"""
import logging
from ezomero import post_dataset, post_project
from ezomero import get_image_ids, link_images_to_dataset
from ezomero import post_screen, link_plates_to_screen
from importlib import import_module
from omero.cli import CLI
from omero.plugins.sessions import SessionsControl
from omero.rtypes import rstring
from omero.sys import Parameters
from omero.gateway import MapAnnotationWrapper
from pathlib import Path
ImportControl = import_module("omero.plugins.import").ImportControl
# Constants
CURRENT_MD_NS = 'jax.org/omeroutils/user_submitted/v0'
# Functions
def set_or_create_project(conn, project_name):
"""Create a new Project unless one already exists with that name.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
project_name : str
The name of the Project needed. If there is no Project with a matching
name in the group specified in ``conn``, a new Project will be created.
Returns
-------
project_id : int
The id of the Project that was either found or created.
"""
ps = conn.getObjects('Project', attributes={'name': project_name})
ps = list(ps)
if len(ps) == 0:
project_id = post_project(conn, project_name)
print(f'Created new Project:{project_id}')
else:
project_id = ps[0].getId()
return project_id
def set_or_create_dataset(conn, project_id, dataset_name):
"""Create a new Dataset unless one already exists with that name/Project.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
project_id : int
Id of Project in which to find/create Dataset.
dataset_name : str
The name of the Dataset needed. If there is no Dataset with a matching
name in the group specified in ``conn``, in the Project specified with
``project_id``, a new Dataset will be created accordingly.
Returns
-------
dataset_id : int
The id of the Dataset that was either found or created.
"""
ds = conn.getObjects('Dataset',
attributes={'name': dataset_name},
opts={'project': project_id})
ds = list(ds)
if len(ds) == 0:
dataset_id = post_dataset(conn, dataset_name, project_id=project_id)
print(f'Created new Dataset:{dataset_id}')
else:
dataset_id = ds[0].getId()
return dataset_id
def set_or_create_screen(conn, screen_name):
"""Create a new Screen unless one already exists with that name.
Parameter
---------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
screen_name : str
The name of the Screen needed. If there is no Screen with a matching
name in the group specified in ``conn``, a new Screen will be created.
Returns
-------
screen_id : int
The id of the Project that was either found or created.
"""
ss = conn.getObjects('Screen', attributes={'name': screen_name})
ss = list(ss)
if len(ss) == 0:
screen_id = post_screen(conn, screen_name)
print(f'Created new Screen:{screen_id}')
else:
screen_id = ss[0].getId()
return screen_id
def multi_post_map_annotation(conn, object_type, object_ids, kv_dict, ns):
"""Create a single new MapAnnotation and link to multiple images.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object
OMERO connection.
object_type : str
OMERO object type, passed to ``BlitzGateway.getObjects``
object_ids : int or list of ints
IDs of objects to which the new MapAnnotation will be linked.
kv_dict : dict
key-value pairs that will be included in the MapAnnotation
ns : str
Namespace for the MapAnnotation
Notes
-----
All keys and values are converted to strings before saving in OMERO.
Returns
-------
map_ann_id : int
IDs of newly created MapAnnotation
Examples
--------
>>> ns = 'jax.org/jax/example/namespace'
>>> d = {'species': 'human',
'occupation': 'time traveler'
'first name': 'Kyle',
'surname': 'Reese'}
>>> multi_post_map_annotation(conn, "Image", [23,56,78], d, ns)
234
"""
if type(object_ids) not in [list, int]:
raise TypeError('object_ids must be list or integer')
if type(object_ids) is not list:
object_ids = [object_ids]
if len(object_ids) == 0:
raise ValueError('object_ids must contain one or more items')
if type(kv_dict) is not dict:
raise TypeError('kv_dict must be of type `dict`')
kv_pairs = []
for k, v in kv_dict.items():
k = str(k)
v = str(v)
kv_pairs.append([k, v])
map_ann = MapAnnotationWrapper(conn)
map_ann.setNs(str(ns))
map_ann.setValue(kv_pairs)
map_ann.save()
for o in conn.getObjects(object_type, object_ids):
o.linkAnnotation(map_ann)
return map_ann.getId()
# Class definitions
class Importer:
"""Class for managing OMERO imports using OMERO CLI.
Metadata from ``import.json`` (item in 'import_targets') is required for
assigning to Project/Dataset and adding MapAnnotations.
Parameters
----------
conn : ``omero.gateway.BlitzGateway`` object.
OMERO connection.
file_path : pathlike object
Path to the file to imported into OMERO.
import_md : dict
Contains metadata required for import and annotation. Generally, at
item from ``import.json`` ('import_targets').
Attributes
----------
conn : ``omero.gateway.BlitzGateway`` object.
From parameter given at initialization.
file_path : ``pathlib.Path`` object
From parameter given at initialization.
md : dict
From ``import_md`` parameter given at initialization.
session_uuid : str
UUID for OMERO session represented by ``self.conn``. Supplied to
OMERO CLI for connection purposes.
filename : str
Filename of file to be imported. Populated from ``self.md``.
project : str
Name of Project to contain the image. Populated from ``self.md``.
dataset : str
Name of Dataset to contain the image. Poplulated from ``self.md``.
imported : boolean
Flag indicating import status.
image_ids : list of ints
The Ids of the images in OMERO. Populated after a file is imported.
This list may contain one or more images derived from a single file.
"""
def __init__(self, conn, file_path, import_md):
self.conn = conn
self.file_path = Path(file_path)
self.md = import_md
self.session_uuid = conn.getSession().getUuid().val
self.filename = self.md.pop('filename')
if 'project' in self.md.keys():
self.project = self.md.pop('project')
else:
self.project = None
if 'dataset' in self.md.keys():
self.dataset = self.md.pop('dataset')
else:
self.dataset = None
if 'screen' in self.md.keys():
self.screen = self.md.pop('screen')
else:
self.screen = None
self.imported = False
self.image_ids = None
self.plate_ids = None
def get_image_ids(self):
"""Get the Ids of imported images.
Note that this will not find images if they have not been imported.
Also, while image_ids are returned, this method also sets
``self.image_ids``.
Returns
-------
image_ids : list of ints
Ids of images imported from the specified client path, which
itself is derived from ``self.file_path`` and ``self.filename``.
"""
if self.imported is not True:
logging.error(f'File {self.file_path} has not been imported')
return None
else:
q = self.conn.getQueryService()
params = Parameters()
path_query = str(self.file_path).strip('/')
params.map = {"cpath": rstring(path_query)}
results = q.projection(
"SELECT i.id FROM Image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" WHERE u.clientPath=:cpath",
params,
self.conn.SERVICE_OPTS
)
self.image_ids = [r[0].val for r in results]
return self.image_ids
def get_plate_ids(self):
"""Get the Ids of imported plates.
Note that this will not find plates if they have not been imported.
Also, while plate_ids are returned, this method also sets
``self.plate_ids``.
Returns
-------
plate_ids : list of ints
Ids of plates imported from the specified client path, which
itself is derived from ``self.file_path`` and ``self.filename``.
"""
if self.imported is not True:
logging.error(f'File {self.file_path} has not been imported')
return None
else:
print("time to get some IDs")
q = self.conn.getQueryService()
print(q)
params = Parameters()
path_query = str(self.file_path).strip('/')
print(f"path query: f{path_query}")
params.map = {"cpath": rstring(path_query)}
print(params)
results = q.projection(
"SELECT DISTINCT p.id FROM Plate p"
" JOIN p.plateAcquisitions pa"
" JOIN pa.wellSample ws"
" JOIN ws.image i"
" JOIN i.fileset fs"
" JOIN fs.usedFiles u"
" WHERE u.clientPath=:cpath",
params,
self.conn.SERVICE_OPTS
)
print(results)
self.plate_ids = [r[0].val for r in results]
return self.plate_ids
def annotate_images(self):
"""Post map annotation (``self.md``) to images ``self.image_ids``.
Returns
-------
map_ann_id : int
The Id of the MapAnnotation that was created.
"""
if len(self.image_ids) == 0:
logging.error('No image ids to annotate')
return None
else:
map_ann_id = multi_post_map_annotation(self.conn, "Image",
self.image_ids, self.md,
CURRENT_MD_NS)
return map_ann_id
def annotate_plates(self):
"""Post map annotation (``self.md``) to plates ``self.plate_ids``.
Returns
-------
map_ann_id : int
The Id of the MapAnnotation that was created.
"""
if len(self.plate_ids) == 0:
logging.error('No plate ids to annotate')
return None
else:
map_ann_id = multi_post_map_annotation(self.conn, "Plate",
self.plate_ids, self.md,
CURRENT_MD_NS)
return map_ann_id
def organize_images(self):
"""Move images to ``self.project``/``self.dataset``.
Returns
-------
image_moved : boolean
True if images were found and moved, else False.
"""
if not self.image_ids:
logging.error('No image ids to organize')
return False
orphans = get_image_ids(self.conn)
for im_id in self.image_ids:
if im_id not in orphans:
logging.error(f'Image:{im_id} not an orphan')
else:
project_id = set_or_create_project(self.conn, self.project)
dataset_id = set_or_create_dataset(self.conn,
project_id,
self.dataset)
link_images_to_dataset(self.conn, [im_id], dataset_id)
print(f'Moved Image:{im_id} to Dataset:{dataset_id}')
return True
def organize_plates(self):
"""Move plates to ``self.screen``.
Returns
-------
plate_moved : boolean
True if plates were found and moved, else False.
"""
if len(self.plate_ids) == 0:
logging.error('No plate ids to organize')
return False
for pl_id in self.plate_ids:
screen_id = set_or_create_screen(self.conn, self.screen)
link_plates_to_screen(self.conn, [pl_id], screen_id)
print(f'Moved Plate:{pl_id} to Screen:{screen_id}')
return True
def import_ln_s(self, host, port):
"""Import file using the ``--transfer=ln_s`` option.
Parameters
----------
host : str
Hostname of OMERO server in which images will be imported.
port : int
Port used to connect to OMERO.server.
Returns
-------
import_status : boolean
True if OMERO import returns a 0 exit status, else False.
"""
cli = CLI()
cli.register('import', ImportControl, '_')
cli.register('sessions', SessionsControl, '_')
cli.invoke(['import',
'-k', self.conn.getSession().getUuid().val,
'-s', host,
'-p', str(port),
'--transfer', 'ln_s',
str(self.file_path)])
if cli.rv == 0:
self.imported = True
print(f'Imported {self.file_path}')
return True
else:
logging.error(f'Import of {self.file_path} has failed!')
return False
| en | null | null |
0026028.py | import librosa
import librosa.filters
import numpy as np
import tensorflow as tf
from scipy import signal
from scipy.io import wavfile
def load_wav(path, sr):
return librosa.core.load(path, sr=sr)[0]
def save_wav(wav, path, sr):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
#proposed by @dsmiller
wavfile.write(path, sr, wav.astype(np.int16))
def save_wavenet_wav(wav, path, sr):
librosa.output.write_wav(path, wav, sr=sr)
def preemphasis(wav, k, preemphasize=True):
if preemphasize:
return signal.lfilter([1, -k], [1], wav)
return wav
def inv_preemphasis(wav, k, inv_preemphasize=True):
if inv_preemphasize:
return signal.lfilter([1], [1, -k], wav)
return wav
#From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py
def start_and_end_indices(quantized, silence_threshold=2):
for start in range(quantized.size):
if abs(quantized[start] - 127) > silence_threshold:
break
for end in range(quantized.size - 1, 1, -1):
if abs(quantized[end] - 127) > silence_threshold:
break
assert abs(quantized[start] - 127) > silence_threshold
assert abs(quantized[end] - 127) > silence_threshold
return start, end
def get_hop_size(hparams):
hop_size = hparams.hop_size
if hop_size is None:
assert hparams.frame_shift_ms is not None
hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)
return hop_size
def linearspectrogram(wav, hparams):
D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db
if hparams.signal_normalization:
return _normalize(S, hparams)
return S
def melspectrogram(wav, hparams):
D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db
if hparams.signal_normalization:
return _normalize(S, hparams)
return S
def inv_linear_spectrogram(linear_spectrogram, hparams):
"""Converts linear spectrogram to waveform using librosa"""
if hparams.signal_normalization:
D = _denormalize(linear_spectrogram, hparams)
else:
D = linear_spectrogram
S = _db_to_amp(D + hparams.ref_level_db) #Convert back to linear
if hparams.use_lws:
processor = _lws_processor(hparams)
D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
y = processor.istft(D).astype(np.float32)
return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
else:
return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
def inv_mel_spectrogram(mel_spectrogram, hparams):
"""Converts mel spectrogram to waveform using librosa"""
if hparams.signal_normalization:
D = _denormalize(mel_spectrogram, hparams)
else:
D = mel_spectrogram
#print(D)
S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db), hparams) # Convert back to linear
#print(S)
if hparams.use_lws:
processor = _lws_processor(hparams)
D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
y = processor.istft(D).astype(np.float32)
return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
else:
return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
def _lws_processor(hparams):
import lws
return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode="speech")
def _griffin_lim(S, hparams):
"""librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
"""
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles, hparams)
for i in range(hparams.griffin_lim_iters):
angles = np.exp(1j * np.angle(_stft(y, hparams)))
y = _istft(S_complex * angles, hparams)
return y
def _stft(y, hparams):
if hparams.use_lws:
return _lws_processor(hparams).stft(y).T
else:
return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size)
def _istft(y, hparams):
return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size)
##########################################################
#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
def num_frames(length, fsize, fshift):
"""Compute number of time frames of spectrogram
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M
def pad_lr(x, fsize, fshift):
"""Compute left and right padding
"""
M = num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r
##########################################################
#Librosa correct padding
def librosa_pad_lr(x, fsize, fshift):
return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
# Conversions
_mel_basis = None
_inv_mel_basis = None
def _linear_to_mel(spectogram, hparams):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis(hparams)
return np.dot(_mel_basis, spectogram)
def _mel_to_linear(mel_spectrogram, hparams):
global _inv_mel_basis
if _inv_mel_basis is None:
_inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))
return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
def _build_mel_basis(hparams):
assert hparams.fmax <= hparams.sample_rate // 2
print(hparams.sample_rate, hparams.n_fft, hparams.num_mels, hparams.fmin, hparams.fmax)
return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels,
fmin=hparams.fmin, fmax=hparams.fmax)
def _amp_to_db(x, hparams):
min_level = np.exp(hparams.min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
def _db_to_amp(x):
return np.power(10.0, (x) * 0.05)
def _normalize(S, hparams):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value,
-hparams.max_abs_value, hparams.max_abs_value)
else:
return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value)
assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0
if hparams.symmetric_mels:
return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value
else:
return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db))
def _denormalize(D, hparams):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return (((np.clip(D, -hparams.max_abs_value,
hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value))
+ hparams.min_level_db)
else:
return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
if hparams.symmetric_mels:
return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db)
else:
return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
| import librosa
import librosa.filters
import numpy as np
import tensorflow as tf
from scipy import signal
from scipy.io import wavfile
def load_wav(path, sr):
return librosa.core.load(path, sr=sr)[0]
def save_wav(wav, path, sr):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
#proposed by @dsmiller
wavfile.write(path, sr, wav.astype(np.int16))
def save_wavenet_wav(wav, path, sr):
librosa.output.write_wav(path, wav, sr=sr)
def preemphasis(wav, k, preemphasize=True):
if preemphasize:
return signal.lfilter([1, -k], [1], wav)
return wav
def inv_preemphasis(wav, k, inv_preemphasize=True):
if inv_preemphasize:
return signal.lfilter([1], [1, -k], wav)
return wav
#From https://github.com/r9y9/wavenet_vocoder/blob/master/audio.py
def start_and_end_indices(quantized, silence_threshold=2):
for start in range(quantized.size):
if abs(quantized[start] - 127) > silence_threshold:
break
for end in range(quantized.size - 1, 1, -1):
if abs(quantized[end] - 127) > silence_threshold:
break
assert abs(quantized[start] - 127) > silence_threshold
assert abs(quantized[end] - 127) > silence_threshold
return start, end
def get_hop_size(hparams):
hop_size = hparams.hop_size
if hop_size is None:
assert hparams.frame_shift_ms is not None
hop_size = int(hparams.frame_shift_ms / 1000 * hparams.sample_rate)
return hop_size
def linearspectrogram(wav, hparams):
D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
S = _amp_to_db(np.abs(D), hparams) - hparams.ref_level_db
if hparams.signal_normalization:
return _normalize(S, hparams)
return S
def melspectrogram(wav, hparams):
D = _stft(preemphasis(wav, hparams.preemphasis, hparams.preemphasize), hparams)
S = _amp_to_db(_linear_to_mel(np.abs(D), hparams), hparams) - hparams.ref_level_db
if hparams.signal_normalization:
return _normalize(S, hparams)
return S
def inv_linear_spectrogram(linear_spectrogram, hparams):
"""Converts linear spectrogram to waveform using librosa"""
if hparams.signal_normalization:
D = _denormalize(linear_spectrogram, hparams)
else:
D = linear_spectrogram
S = _db_to_amp(D + hparams.ref_level_db) #Convert back to linear
if hparams.use_lws:
processor = _lws_processor(hparams)
D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
y = processor.istft(D).astype(np.float32)
return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
else:
return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
def inv_mel_spectrogram(mel_spectrogram, hparams):
"""Converts mel spectrogram to waveform using librosa"""
if hparams.signal_normalization:
D = _denormalize(mel_spectrogram, hparams)
else:
D = mel_spectrogram
#print(D)
S = _mel_to_linear(_db_to_amp(D + hparams.ref_level_db), hparams) # Convert back to linear
#print(S)
if hparams.use_lws:
processor = _lws_processor(hparams)
D = processor.run_lws(S.astype(np.float64).T ** hparams.power)
y = processor.istft(D).astype(np.float32)
return inv_preemphasis(y, hparams.preemphasis, hparams.preemphasize)
else:
return inv_preemphasis(_griffin_lim(S ** hparams.power, hparams), hparams.preemphasis, hparams.preemphasize)
def _lws_processor(hparams):
import lws
return lws.lws(hparams.n_fft, get_hop_size(hparams), fftsize=hparams.win_size, mode="speech")
def _griffin_lim(S, hparams):
"""librosa implementation of Griffin-Lim
Based on https://github.com/librosa/librosa/issues/434
"""
angles = np.exp(2j * np.pi * np.random.rand(*S.shape))
S_complex = np.abs(S).astype(np.complex)
y = _istft(S_complex * angles, hparams)
for i in range(hparams.griffin_lim_iters):
angles = np.exp(1j * np.angle(_stft(y, hparams)))
y = _istft(S_complex * angles, hparams)
return y
def _stft(y, hparams):
if hparams.use_lws:
return _lws_processor(hparams).stft(y).T
else:
return librosa.stft(y=y, n_fft=hparams.n_fft, hop_length=get_hop_size(hparams), win_length=hparams.win_size)
def _istft(y, hparams):
return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams.win_size)
##########################################################
#Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
def num_frames(length, fsize, fshift):
"""Compute number of time frames of spectrogram
"""
pad = (fsize - fshift)
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M
def pad_lr(x, fsize, fshift):
"""Compute left and right padding
"""
M = num_frames(len(x), fsize, fshift)
pad = (fsize - fshift)
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r
##########################################################
#Librosa correct padding
def librosa_pad_lr(x, fsize, fshift):
return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
# Conversions
_mel_basis = None
_inv_mel_basis = None
def _linear_to_mel(spectogram, hparams):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis(hparams)
return np.dot(_mel_basis, spectogram)
def _mel_to_linear(mel_spectrogram, hparams):
global _inv_mel_basis
if _inv_mel_basis is None:
_inv_mel_basis = np.linalg.pinv(_build_mel_basis(hparams))
return np.maximum(1e-10, np.dot(_inv_mel_basis, mel_spectrogram))
def _build_mel_basis(hparams):
assert hparams.fmax <= hparams.sample_rate // 2
print(hparams.sample_rate, hparams.n_fft, hparams.num_mels, hparams.fmin, hparams.fmax)
return librosa.filters.mel(hparams.sample_rate, hparams.n_fft, n_mels=hparams.num_mels,
fmin=hparams.fmin, fmax=hparams.fmax)
def _amp_to_db(x, hparams):
min_level = np.exp(hparams.min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
def _db_to_amp(x):
return np.power(10.0, (x) * 0.05)
def _normalize(S, hparams):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return np.clip((2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value,
-hparams.max_abs_value, hparams.max_abs_value)
else:
return np.clip(hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db)), 0, hparams.max_abs_value)
assert S.max() <= 0 and S.min() - hparams.min_level_db >= 0
if hparams.symmetric_mels:
return (2 * hparams.max_abs_value) * ((S - hparams.min_level_db) / (-hparams.min_level_db)) - hparams.max_abs_value
else:
return hparams.max_abs_value * ((S - hparams.min_level_db) / (-hparams.min_level_db))
def _denormalize(D, hparams):
if hparams.allow_clipping_in_normalization:
if hparams.symmetric_mels:
return (((np.clip(D, -hparams.max_abs_value,
hparams.max_abs_value) + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value))
+ hparams.min_level_db)
else:
return ((np.clip(D, 0, hparams.max_abs_value) * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
if hparams.symmetric_mels:
return (((D + hparams.max_abs_value) * -hparams.min_level_db / (2 * hparams.max_abs_value)) + hparams.min_level_db)
else:
return ((D * -hparams.min_level_db / hparams.max_abs_value) + hparams.min_level_db)
| en | null | null |
0028922.py | from buildtest.cli.help import buildtest_help
def test_buildtest_help():
buildtest_help(command="build")
buildtest_help(command="buildspec")
buildtest_help(command="config")
buildtest_help(command="cdash")
buildtest_help(command="history")
buildtest_help(command="inspect")
buildtest_help(command="report")
buildtest_help(command="schema")
buildtest_help(command="stylecheck")
buildtest_help(command="unittests")
| from buildtest.cli.help import buildtest_help
def test_buildtest_help():
buildtest_help(command="build")
buildtest_help(command="buildspec")
buildtest_help(command="config")
buildtest_help(command="cdash")
buildtest_help(command="history")
buildtest_help(command="inspect")
buildtest_help(command="report")
buildtest_help(command="schema")
buildtest_help(command="stylecheck")
buildtest_help(command="unittests")
| en | null | null |
0037594.py | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test resurrection of mined transactions when
# the blockchain is re-organized.
#
from test_framework import BitcoinTestFramework
from tucoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].setgenerate(True, 1))
spends2_raw = [ self.create_tx(txid, node0_address, 49.99) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].setgenerate(True, 1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].setgenerate(True, 1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test resurrection of mined transactions when
# the blockchain is re-organized.
#
from test_framework import BitcoinTestFramework
from tucoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend block 1/2/3's coinbase transactions
# Mine a block.
# Create three more transactions, spending the spends
# Mine another block.
# ... make sure all the transactions are confirmed
# Invalidate both blocks
# ... make sure all the transactions are put back in the mempool
# Mine a new block
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spends1_raw = [ self.create_tx(txid, node0_address, 50) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
blocks = []
blocks.extend(self.nodes[0].setgenerate(True, 1))
spends2_raw = [ self.create_tx(txid, node0_address, 49.99) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
blocks.extend(self.nodes[0].setgenerate(True, 1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidateblock to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidateblock(blocks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another block, they should all get mined
self.nodes[0].setgenerate(True, 1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| en | null | null |
0010807.py | #!/usr/bin/env python3
import logging
import sys
import subprocess
from taupage import configure_logging, get_config
def main():
"""Configure custom sysctl parameters
If a sysctl section is present, add the valid parameters to sysctl and reloads.
"""
CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'
configure_logging()
config = get_config()
sysctl = config.get('sysctl')
if sysctl is None:
sys.exit(0)
try:
sysctl_entries = ['{} = {}'.format(key, value) for key, value in sysctl.items()]
with open(CUSTOM_SYSCTL_CONF, 'w') as file:
file.write('\n'.join(sysctl_entries)+'\n')
logging.info('Successfully written sysctl parameters')
except Exception as e:
logging.error('Failed to write sysctl parameters')
logging.exception(e)
sys.exit(1)
try:
exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
if exitcode != 0:
logging.error('Reloading sysctl failed with exitcode {}'.format(exitcode))
sys.exit(1)
logging.info('Successfully reloaded sysctl parameters')
except Exception as e:
logging.error('Failed to reload sysctl')
logging.exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import logging
import sys
import subprocess
from taupage import configure_logging, get_config
def main():
"""Configure custom sysctl parameters
If a sysctl section is present, add the valid parameters to sysctl and reloads.
"""
CUSTOM_SYSCTL_CONF = '/etc/sysctl.d/99-custom.conf'
configure_logging()
config = get_config()
sysctl = config.get('sysctl')
if sysctl is None:
sys.exit(0)
try:
sysctl_entries = ['{} = {}'.format(key, value) for key, value in sysctl.items()]
with open(CUSTOM_SYSCTL_CONF, 'w') as file:
file.write('\n'.join(sysctl_entries)+'\n')
logging.info('Successfully written sysctl parameters')
except Exception as e:
logging.error('Failed to write sysctl parameters')
logging.exception(e)
sys.exit(1)
try:
exitcode = subprocess.call(['/sbin/sysctl', '-p', CUSTOM_SYSCTL_CONF])
if exitcode != 0:
logging.error('Reloading sysctl failed with exitcode {}'.format(exitcode))
sys.exit(1)
logging.info('Successfully reloaded sysctl parameters')
except Exception as e:
logging.error('Failed to reload sysctl')
logging.exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
| en | null | null |
0017586.py | # flake8: noqa
from typing import Any
from fugue_version import __version__
from IPython import get_ipython
from IPython.display import Javascript
from fugue_notebook.env import NotebookSetup, _setup_fugue_notebook
_HIGHLIGHT_JS = r"""
require(["codemirror/lib/codemirror"]);
function set(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var fugue_keywords = "fill hash rand even presort persist broadcast params process output outtransform rowcount concurrency prepartition zip print title save append parquet csv json single checkpoint weak strong deterministic yield connect sample seed take sub callback dataframe file";
CodeMirror.defineMIME("text/x-fsql", {
name: "sql",
keywords: set(fugue_keywords + " add after all alter analyze and anti archive array as asc at between bucket buckets by cache cascade case cast change clear cluster clustered codegen collection column columns comment commit compact compactions compute concatenate cost create cross cube current current_date current_timestamp database databases data dbproperties defined delete delimited deny desc describe dfs directories distinct distribute drop else end escaped except exchange exists explain export extended external false fields fileformat first following for format formatted from full function functions global grant group grouping having if ignore import in index indexes inner inpath inputformat insert intersect interval into is items join keys last lateral lazy left like limit lines list load local location lock locks logical macro map minus msck natural no not null nulls of on optimize option options or order out outer outputformat over overwrite partition partitioned partitions percent preceding principals purge range recordreader recordwriter recover reduce refresh regexp rename repair replace reset restrict revoke right rlike role roles rollback rollup row rows schema schemas select semi separated serde serdeproperties set sets show skewed sort sorted start statistics stored stratify struct table tables tablesample tblproperties temp temporary terminated then to touch transaction transactions transform true truncate unarchive unbounded uncache union unlock unset use using values view when where window with"),
builtin: set("date datetime tinyint smallint int bigint boolean float double string binary timestamp decimal array map struct uniontype delimited serde sequencefile textfile rcfile inputformat outputformat"),
atoms: set("false true null"),
operatorChars: /^[*\/+\-%<>!=~&|^]/,
dateSQL: set("time"),
support: set("ODBCdotTable doubleQuote zerolessFloat")
});
CodeMirror.modeInfo.push( {
name: "Fugue SQL",
mime: "text/x-fsql",
mode: "sql"
} );
require(['notebook/js/codecell'], function(codecell) {
codecell.CodeCell.options_default.highlight_modes['magic_text/x-fsql'] = {'reg':[/%%fsql/]} ;
Jupyter.notebook.events.on('kernel_ready.Kernel', function(){
Jupyter.notebook.get_cells().map(function(cell){
if (cell.cell_type == 'code'){ cell.auto_highlight(); } }) ;
});
});
"""
def load_ipython_extension(ip: Any) -> None:
"""Entrypoint for IPython %load_ext"""
_setup_fugue_notebook(ip, None)
def _jupyter_nbextension_paths():
"""Entrypoint for Jupyter extension"""
return [
{
"section": "notebook",
"src": "nbextension",
"dest": "fugue_notebook",
"require": "fugue_notebook/main",
}
]
def setup(notebook_setup: Any = None, is_lab: bool = False) -> Any:
"""Setup the notebook environment inside notebook without
installing the jupyter extension or loading ipython extension
:param notebook_setup: ``None`` or an instance of
:class:`~.fugue_notebook.env.NotebookSetup`, defaults to None
"""
ip = get_ipython()
_setup_fugue_notebook(ip, notebook_setup)
if not is_lab:
return Javascript(_HIGHLIGHT_JS)
| # flake8: noqa
from typing import Any
from fugue_version import __version__
from IPython import get_ipython
from IPython.display import Javascript
from fugue_notebook.env import NotebookSetup, _setup_fugue_notebook
_HIGHLIGHT_JS = r"""
require(["codemirror/lib/codemirror"]);
function set(str) {
var obj = {}, words = str.split(" ");
for (var i = 0; i < words.length; ++i) obj[words[i]] = true;
return obj;
}
var fugue_keywords = "fill hash rand even presort persist broadcast params process output outtransform rowcount concurrency prepartition zip print title save append parquet csv json single checkpoint weak strong deterministic yield connect sample seed take sub callback dataframe file";
CodeMirror.defineMIME("text/x-fsql", {
name: "sql",
keywords: set(fugue_keywords + " add after all alter analyze and anti archive array as asc at between bucket buckets by cache cascade case cast change clear cluster clustered codegen collection column columns comment commit compact compactions compute concatenate cost create cross cube current current_date current_timestamp database databases data dbproperties defined delete delimited deny desc describe dfs directories distinct distribute drop else end escaped except exchange exists explain export extended external false fields fileformat first following for format formatted from full function functions global grant group grouping having if ignore import in index indexes inner inpath inputformat insert intersect interval into is items join keys last lateral lazy left like limit lines list load local location lock locks logical macro map minus msck natural no not null nulls of on optimize option options or order out outer outputformat over overwrite partition partitioned partitions percent preceding principals purge range recordreader recordwriter recover reduce refresh regexp rename repair replace reset restrict revoke right rlike role roles rollback rollup row rows schema schemas select semi separated serde serdeproperties set sets show skewed sort sorted start statistics stored stratify struct table tables tablesample tblproperties temp temporary terminated then to touch transaction transactions transform true truncate unarchive unbounded uncache union unlock unset use using values view when where window with"),
builtin: set("date datetime tinyint smallint int bigint boolean float double string binary timestamp decimal array map struct uniontype delimited serde sequencefile textfile rcfile inputformat outputformat"),
atoms: set("false true null"),
operatorChars: /^[*\/+\-%<>!=~&|^]/,
dateSQL: set("time"),
support: set("ODBCdotTable doubleQuote zerolessFloat")
});
CodeMirror.modeInfo.push( {
name: "Fugue SQL",
mime: "text/x-fsql",
mode: "sql"
} );
require(['notebook/js/codecell'], function(codecell) {
codecell.CodeCell.options_default.highlight_modes['magic_text/x-fsql'] = {'reg':[/%%fsql/]} ;
Jupyter.notebook.events.on('kernel_ready.Kernel', function(){
Jupyter.notebook.get_cells().map(function(cell){
if (cell.cell_type == 'code'){ cell.auto_highlight(); } }) ;
});
});
"""
def load_ipython_extension(ip: Any) -> None:
"""Entrypoint for IPython %load_ext"""
_setup_fugue_notebook(ip, None)
def _jupyter_nbextension_paths():
"""Entrypoint for Jupyter extension"""
return [
{
"section": "notebook",
"src": "nbextension",
"dest": "fugue_notebook",
"require": "fugue_notebook/main",
}
]
def setup(notebook_setup: Any = None, is_lab: bool = False) -> Any:
"""Setup the notebook environment inside notebook without
installing the jupyter extension or loading ipython extension
:param notebook_setup: ``None`` or an instance of
:class:`~.fugue_notebook.env.NotebookSetup`, defaults to None
"""
ip = get_ipython()
_setup_fugue_notebook(ip, notebook_setup)
if not is_lab:
return Javascript(_HIGHLIGHT_JS)
| en | null | null |
Language Decoded | Multilingual Code Dataset
Multilingual Python code datasets for the Language Decoded project (part of Cohere's Tiny Aya Expedition), investigating whether code's reasoning benefit for language models is language-dependent or structure-dependent.
Research Question
Does fine-tuning on non-English code (Python with translated keywords) improve multilingual reasoning as much as English code does?
Prior work (Aryabumi et al., 2024 -- "To Code or Not to Code") demonstrated that including English code in pre-training data improves downstream reasoning performance by approximately 8%. However, that study only tested English code. This dataset enables the natural follow-up: does the reasoning benefit come from the structure of code, or from the language of its keywords?
Dataset Description
This dataset provides filtered, quality-controlled Python source code in four configurations: the original English and three keyword-swapped variants (Chinese, Spanish, Urdu). The source data is drawn from bigcode/the-stack-dedup (Python subset), filtered for quality using the following criteria:
- AST-valid Python only (must parse without errors)
- Permissive licenses only (MIT, Apache-2.0, BSD, etc.)
- 10--1000 lines of code
- Minimum 21 GitHub stars
- No autogenerated files
- SHA-256 deduplication
Keyword-swapped variants are produced using Legesher v0.7.3, which translates Python reserved words (37 keywords, 72 builtins, 66 exceptions) into the target language while preserving code structure and semantics.
Available Configs
| Config | Condition | Language | Description |
|---|---|---|---|
condition-1-en |
Condition 1 (control) | English | Unmodified filtered Python from The Stack Dedup |
condition-2-ur |
Condition 2 | Urdu | Keyword-swapped Python -- 37 keywords, 72 builtins, 66 exceptions translated via Legesher v0.7.3 |
condition-2-zh |
Condition 2 | Chinese | Keyword-swapped Python -- same transpilation method |
condition-2-es |
Condition 2 | Spanish | Keyword-swapped Python -- same transpilation method |
condition-3-zh-5k |
Condition 3 | Chinese | Blended: 3,486 native Chinese code + 1,514 transpiled Python (see Condition 3 section below) |
Schema
Conditions 1--2
| Column | Type | Description |
|---|---|---|
code |
string | Python source code. For condition-2 configs, this is the transpiled (keyword-swapped) version. For condition-1, this is the original English source. |
code_en |
string | Original English Python source code. Identical to code for condition-1-en. |
language |
string | ISO 639-1 language code: en, ur, zh, or es. |
file_path |
string | Original file path in The Stack Dedup. |
license |
string | SPDX license identifier for the source file. |
token_count |
int64 | Token count computed using the CohereLabs/tiny-aya-base tokenizer. |
Condition 3
Condition 3 blends native Chinese code with transpiled code and adds a source_type column to distinguish them. code_en is populated for transpiled rows (keeping them in sync with conditions 1--2) but null for native code rows, which have no English equivalent.
| Column | Type | Description |
|---|---|---|
file_path |
string | File identifier (native filename or transpiled file path) |
code |
string | The code content (native or transpiled) |
code_en |
string/null | English original -- populated for transpiled rows, null for native code rows |
language |
string | ISO 639-1 language code (zh) |
license |
string | Source license (SPDX identifier, UNKNOWN, or varies) |
token_count |
int64 | Token count computed using the CohereLabs/tiny-aya-base tokenizer |
source_type |
string | "native" (natively Chinese-authored) or "transpiled" (keyword-swapped English) |
Experimental Conditions
The Language Decoded experiment uses a ladder of six conditions to isolate the mechanism behind code's reasoning benefit. This dataset currently provides data for conditions 1 and 2:
| Condition | Name | Purpose |
|---|---|---|
| Baseline | No fine-tuning | Establishes the performance floor |
| Condition 1 | English code | Tests whether code fine-tuning helps at all (replicates Aryabumi et al.) |
| Condition 2 | Keyword-swapped code | Tests whether the language of keywords matters for the reasoning benefit |
| Condition 3 | Mixed native sources | Tests whether diverse native-language code adds value beyond keyword swapping |
| Conditions 4--6 | (planned) | Additional controls not yet included in this dataset |
Usage
from datasets import load_dataset
# Load English code (control)
ds = load_dataset("legesher/language-decoded-data", "condition-1-en")
# Load a keyword-swapped variant
ds = load_dataset("legesher/language-decoded-data", "condition-2-ur")
ds = load_dataset("legesher/language-decoded-data", "condition-2-zh")
ds = load_dataset("legesher/language-decoded-data", "condition-2-es")
# Load blended native + transpiled (condition 3)
ds = load_dataset("legesher/language-decoded-data", "condition-3-zh-5k")
# Access splits
train = ds["train"]
val = ds["validation"]
# Filter condition-3 by source type
native_only = train.filter(lambda x: x["source_type"] == "native")
Technical Details
| Parameter | Value |
|---|---|
| Source dataset | bigcode/the-stack-dedup (Python subset) |
| Transpilation tool | Legesher v0.7.3 (legesher-core, legesher-i18n) |
| Tokenizer | CohereLabs/tiny-aya-base |
| Base model | CohereLabs/tiny-aya-base (3.35B params) |
| Train/validation split | 90% / 10% (seed 42) |
| File format | Parquet (snappy compression) |
| Filtering criteria | AST-valid, permissive licenses, 10--1000 lines, min 21 GitHub stars, no autogenerated files, SHA-256 deduplication |
Citation
@misc{language-decoded-2026,
title={Language Decoded: Investigating Language-Dependent vs. Structure-Dependent Reasoning Benefits of Code},
author={Madison Edgar and Saad Bazaz and Rafay Mustafa and Sarah Jawaid and Rashik Shahjahan and Khojasteh Mirza and Sohaib Bazaz},
year={2026},
publisher={Hugging Face},
url={https://huggingface.co/datasets/legesher/language-decoded-data}
}
Links
License
Apache 2.0
- Downloads last month
- 62