max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
PythonExercicios/Mundo 2/9_estrutura_de_repeticao_while/ex061.py | GuilhermoCampos/Curso-Python3-curso-em-video | 0 | 6618751 | termo = int(input('Digite o Primeiro Termo da PA: '))
razao = int(input('Digite a Razão: '))
acumulador = termo
contador = 0
while contador != 10:
print(acumulador, '→ ' if contador != 9 else '', end='')
acumulador += razao
contador += 1
| termo = int(input('Digite o Primeiro Termo da PA: '))
razao = int(input('Digite a Razão: '))
acumulador = termo
contador = 0
while contador != 10:
print(acumulador, '→ ' if contador != 9 else '', end='')
acumulador += razao
contador += 1
| none | 1 | 3.917728 | 4 | |
workflow/scripts/get_sample_names.py | rx32940/Bacterial_Genome_Analysis_Toolbox | 0 | 6618752 | <reponame>rx32940/Bacterial_Genome_Analysis_Toolbox
import os
with open(snakemake.output[0], "w") as f:
for file in os.listdir(snakemake.input[0]):
f.write(os.path.splitext(file)[0] + "\n")
| import os
with open(snakemake.output[0], "w") as f:
for file in os.listdir(snakemake.input[0]):
f.write(os.path.splitext(file)[0] + "\n") | none | 1 | 2.448104 | 2 | |
ingredients/corpus.py | kata-ai/id-word2vec | 11 | 6618753 | <filename>ingredients/corpus.py
##########################################################################
# Copyright 2019 Kata.ai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
from itertools import chain
from pathlib import Path
import gzip
import json
from sacred import Ingredient
ing = Ingredient('corpus')
@ing.config
def default():
# path to the corpus directory
path = 'tempo'
# product type (kt, mbm, all)
product = 'all'
# read kt corpus starting from this year
kt_begin = 2005
# stop reading kt corpus at this year (inclusive)
kt_end = 2014
# read mbm corpus starting from this year
mbm_begin = 1999
# stop reading mbm corpus at this year (inclusive)
mbm_end = 2014
# file encoding to use
encoding = 'utf-8'
@ing.capture
def read_corpus(
path,
_log,
product='all',
kt_begin=2005,
kt_end=2014,
mbm_begin=1999,
mbm_end=2014,
encoding='utf-8'):
path = Path(path)
if product in ('kt', 'mbm'):
path = path / product
begin, end = (kt_begin, kt_end) if product == 'kt' else (mbm_begin, mbm_end)
_log.info('Reading corpus from %s year %s-%s', path, begin, end)
return _read(path, begin, end, encoding=encoding)
assert product == 'all', "product must be one of 'kt', 'mbm', or 'all'"
_log.info('Reading corpus from %s year %s-%s', path / 'kt', kt_begin, kt_end)
kt_corpus = _read(path / 'kt', kt_begin, kt_end, encoding=encoding)
_log.info('Reading corpus from %s year %s-%s', path / 'mbm', mbm_begin, mbm_end)
mbm_corpus = _read(path / 'mbm', mbm_begin, mbm_end, encoding=encoding)
return chain(kt_corpus, mbm_corpus)
def _read(corpus_dir, begin_year, end_year, encoding='utf-8'):
for year in range(begin_year, end_year + 1):
path = corpus_dir / f'{year}.jsonl'
if not path.exists():
path = corpus_dir / f'{year}.jsonl.gz'
open_fn = gzip.open if path.name.endswith('.gz') else open
with open_fn(path, 'rb') as f:
for line in f:
yield json.loads(line.decode(encoding).strip())['paragraphs']
| <filename>ingredients/corpus.py
##########################################################################
# Copyright 2019 Kata.ai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
from itertools import chain
from pathlib import Path
import gzip
import json
from sacred import Ingredient
ing = Ingredient('corpus')
@ing.config
def default():
# path to the corpus directory
path = 'tempo'
# product type (kt, mbm, all)
product = 'all'
# read kt corpus starting from this year
kt_begin = 2005
# stop reading kt corpus at this year (inclusive)
kt_end = 2014
# read mbm corpus starting from this year
mbm_begin = 1999
# stop reading mbm corpus at this year (inclusive)
mbm_end = 2014
# file encoding to use
encoding = 'utf-8'
@ing.capture
def read_corpus(
path,
_log,
product='all',
kt_begin=2005,
kt_end=2014,
mbm_begin=1999,
mbm_end=2014,
encoding='utf-8'):
path = Path(path)
if product in ('kt', 'mbm'):
path = path / product
begin, end = (kt_begin, kt_end) if product == 'kt' else (mbm_begin, mbm_end)
_log.info('Reading corpus from %s year %s-%s', path, begin, end)
return _read(path, begin, end, encoding=encoding)
assert product == 'all', "product must be one of 'kt', 'mbm', or 'all'"
_log.info('Reading corpus from %s year %s-%s', path / 'kt', kt_begin, kt_end)
kt_corpus = _read(path / 'kt', kt_begin, kt_end, encoding=encoding)
_log.info('Reading corpus from %s year %s-%s', path / 'mbm', mbm_begin, mbm_end)
mbm_corpus = _read(path / 'mbm', mbm_begin, mbm_end, encoding=encoding)
return chain(kt_corpus, mbm_corpus)
def _read(corpus_dir, begin_year, end_year, encoding='utf-8'):
for year in range(begin_year, end_year + 1):
path = corpus_dir / f'{year}.jsonl'
if not path.exists():
path = corpus_dir / f'{year}.jsonl.gz'
open_fn = gzip.open if path.name.endswith('.gz') else open
with open_fn(path, 'rb') as f:
for line in f:
yield json.loads(line.decode(encoding).strip())['paragraphs']
| en | 0.683951 | ########################################################################## # Copyright 2019 Kata.ai # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ########################################################################## # path to the corpus directory # product type (kt, mbm, all) # read kt corpus starting from this year # stop reading kt corpus at this year (inclusive) # read mbm corpus starting from this year # stop reading mbm corpus at this year (inclusive) # file encoding to use | 2.150018 | 2 |
polo_pingpong_var.py | xcbtrader/polo_pigpong_var | 1 | 6618754 | <reponame>xcbtrader/polo_pigpong_var
__author__ = 'xcbtrader'
# -*- coding: utf-8 -*-
import poloniex
import time
import sys
def leer_operativa():
fOperativa = open('polo_pingpong_var_operativa.txt', 'r')
op = fOperativa.readline()
op = int(fOperativa.readline())
fOperativa.close()
return op
def leer_ordenes():
global polo
try:
openOrders = polo.returnOpenOrders('USDT_BTC')
return openOrders
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL LEER LAS ORDENES ABIERTAS ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def realizar_compra(last,margen, saldoUSDTinv):
global polo
precio_compra = last - (last * margen)
try:
make_order_buy = polo.buy('USDT_BTC',precio_compra,saldoUSDTinv/precio_compra)
print('-------------------------------------------------------')
print('*** CREADA ORDEN DE COMPRA NUM ' + make_order_buy['orderNumber'] + ' - PRECIO: ' + str(precio_compra) + ' $ - IVERSION: ' + str(saldoUSDTinv) + ' - BTC: ' + str(saldoUSDTinv/precio_compra) + ' ***')
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL CREAR ORDEN DE COMPRA ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def realizar_venta(last, margen, saldoBTCinv):
global polo
precio_venta = last + (last * margen)
try:
make_order_sell = polo.sell('USDT_BTC', precio_venta, saldoBTCinv)
print('*** CREADA ORDEN DE VENTA NUM ' + make_order_sell['orderNumber'] + ' - PRECIO: ' + str(precio_venta) + ' $ - IVERSION: ' + str(saldoBTCinv) + ' - USD: ' + str(saldoBTCinv * precio_venta) + ' ***')
print('-------------------------------------------------------')
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL CREAR ORDEN DE VENTA ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def realizar_ordenes(margen, saldoUSDT, saldoUSDTinv, saldoBTC, saldoBTCinv):
global polo
try:
ticker = polo.returnTicker()
t = ticker['USDT_BTC']
last = float(t['last'])
if last > 100:
realizar_compra(last,margen, saldoUSDTinv)
time.sleep(15)
realizar_venta(last, margen, saldoBTCinv)
print('### ORDENES REALIZADAS CORRECTAMENTE - ESPERANDO 5 MINUTOS ###')
time.sleep(300)
else:
print('### ERROR AL LEER VALOR ACTUAL btc ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL LEER VALOR ACTUAL btc ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def leer_balance():
global polo
err = True
while err:
try:
balance = polo.returnBalances()
saldoUSDT = float(balance['USDT'])
saldoBTC = float(balance['BTC'])
return saldoUSDT, saldoBTC
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL LEER SALDOS DE LA CUENTA ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def crear_ordenes(margen):
saldoUSDT, saldoBTC = leer_balance()
saldoUSDTinv = saldoUSDT/2
saldoBTCinv = saldoBTC/2
if saldoBTC < 0.0005 or saldoUSDT < 1:
print('### ERROR SALDO INSUFICIENTE PARA REALIZAR ORDEN ###')
print('### ESPERANDO NUEVO SALDO ###')
time.sleep(300)
else:
realizar_ordenes(margen, saldoUSDT, saldoUSDTinv, saldoBTC, saldoBTCinv)
# PROGRAMA PRINCIPAL ##################################################################
global polo
print('')
print('**************************************************************')
print(' INICIANDO BOT POLONIEX PingPong VARIABLE')
print('**************************************************************')
print('')
API_key = 'AQUI PONER NUESTRA API key'
Secret = 'AQUI PONER EL SECRET DE NUESTRA API key'
err = True
while err:
try:
polo = poloniex.Poloniex(API_key,Secret)
err = False
print('### CONECTADO CORRECTAMENTE A LA API DE POLONIEX ###')
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL CONECTAR CON API POLONEX ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
tot_buy = 0
tot_sell= 0
margen = 0.0
while margen <0.5:
m = str(input('Entra margen de beneficio (>=0.5) :? '))
margen = float(m.replace(',','.'))
margen = margen/100
n = 1
while True:
openOrders = leer_ordenes()
nOrdenes = len(openOrders)
operativa = leer_operativa()
if operativa == 1 or operativa == 2:
if nOrdenes == 0 : #Escenario sin inversion. Poner 2 ordenes
crear_ordenes(margen)
elif nOrdenes == 1: #Escenario Con una orden cerrada y una orden abierta. Hay que cerrarla si hay suficiente saldo para nueva inversion
for no in openOrders:
num_orden_cerrar = no['orderNumber']
tipo_orden_cerrar = no['type']
try:
if tipo_orden_cerrar == 'sell':
tot_buy +=1
elif tipo_orden_cerrar == 'buy':
tot_sell +=1
saldoUSDT, saldoBTC = leer_balance()
if saldoBTC < 0.0005 or saldoUSDT < 1:
print('### ERROR SALDO INSUFICIENTE PARA CANCELAR ORDEN ###')
print('### ESPERANDO QUE SE CIERRE ORDEN PENDIENTE ###')
time.sleep(300)
else:
cancelar_orden = polo.cancelOrder(num_orden_cerrar)
print('### CANCELADA ORDEN: ' + str(num_orden_cerrar))
print('### ESPERANDO 2 MINUTOS PARA CONTINUAR ###')
time.sleep(120)
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL CANCELAR ORDEN ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
elif nOrdenes == 2: # Escenario con toda la inversion aun abierta. No hacer nada
ticker = polo.returnTicker()
t = ticker['USDT_BTC']
last = float(t['last'])
print('-------------------------------------------------------')
print(str(n) + ') Buy Ord: ' + str(tot_buy) + ' - Sell Ord: ' + str(tot_sell) + ' - btc = ' + str(last) + ' $')
n +=1
for orde in openOrders:
print(orde['type'] + ' - ' + orde['date'] + ' - ' + orde['rate'] + ' $ - ' + orde['amount'] + ' btc')
print('-------------------------------------------------------')
print('### ESPERANDO 5 MINUTOS PARA CONTINUAR ###')
time.sleep(300)
elif nOrdenes > 2: # Escenario ERROR demasiadas ordenes abiertas
print('### ERROR - DEMASIADAS ORDENES ABIERTAS - Max 2 ORDENES. Act. ' + str(nOrdenes) + ' ABIERTAS ###')
print('### ESPERANDO A QUE SE CIERREN ###')
time.sleep(300)
else:
print ('### PROCESO CANCELADO ###')
exit()
if operativa == 2:
print ('### PROCESO FINALIZADO ###')
exit()
| __author__ = 'xcbtrader'
# -*- coding: utf-8 -*-
import poloniex
import time
import sys
def leer_operativa():
fOperativa = open('polo_pingpong_var_operativa.txt', 'r')
op = fOperativa.readline()
op = int(fOperativa.readline())
fOperativa.close()
return op
def leer_ordenes():
global polo
try:
openOrders = polo.returnOpenOrders('USDT_BTC')
return openOrders
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL LEER LAS ORDENES ABIERTAS ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def realizar_compra(last,margen, saldoUSDTinv):
global polo
precio_compra = last - (last * margen)
try:
make_order_buy = polo.buy('USDT_BTC',precio_compra,saldoUSDTinv/precio_compra)
print('-------------------------------------------------------')
print('*** CREADA ORDEN DE COMPRA NUM ' + make_order_buy['orderNumber'] + ' - PRECIO: ' + str(precio_compra) + ' $ - IVERSION: ' + str(saldoUSDTinv) + ' - BTC: ' + str(saldoUSDTinv/precio_compra) + ' ***')
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL CREAR ORDEN DE COMPRA ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def realizar_venta(last, margen, saldoBTCinv):
global polo
precio_venta = last + (last * margen)
try:
make_order_sell = polo.sell('USDT_BTC', precio_venta, saldoBTCinv)
print('*** CREADA ORDEN DE VENTA NUM ' + make_order_sell['orderNumber'] + ' - PRECIO: ' + str(precio_venta) + ' $ - IVERSION: ' + str(saldoBTCinv) + ' - USD: ' + str(saldoBTCinv * precio_venta) + ' ***')
print('-------------------------------------------------------')
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL CREAR ORDEN DE VENTA ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def realizar_ordenes(margen, saldoUSDT, saldoUSDTinv, saldoBTC, saldoBTCinv):
global polo
try:
ticker = polo.returnTicker()
t = ticker['USDT_BTC']
last = float(t['last'])
if last > 100:
realizar_compra(last,margen, saldoUSDTinv)
time.sleep(15)
realizar_venta(last, margen, saldoBTCinv)
print('### ORDENES REALIZADAS CORRECTAMENTE - ESPERANDO 5 MINUTOS ###')
time.sleep(300)
else:
print('### ERROR AL LEER VALOR ACTUAL btc ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL LEER VALOR ACTUAL btc ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def leer_balance():
global polo
err = True
while err:
try:
balance = polo.returnBalances()
saldoUSDT = float(balance['USDT'])
saldoBTC = float(balance['BTC'])
return saldoUSDT, saldoBTC
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL LEER SALDOS DE LA CUENTA ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
def crear_ordenes(margen):
saldoUSDT, saldoBTC = leer_balance()
saldoUSDTinv = saldoUSDT/2
saldoBTCinv = saldoBTC/2
if saldoBTC < 0.0005 or saldoUSDT < 1:
print('### ERROR SALDO INSUFICIENTE PARA REALIZAR ORDEN ###')
print('### ESPERANDO NUEVO SALDO ###')
time.sleep(300)
else:
realizar_ordenes(margen, saldoUSDT, saldoUSDTinv, saldoBTC, saldoBTCinv)
# PROGRAMA PRINCIPAL ##################################################################
global polo
print('')
print('**************************************************************')
print(' INICIANDO BOT POLONIEX PingPong VARIABLE')
print('**************************************************************')
print('')
API_key = 'AQUI PONER NUESTRA API key'
Secret = 'AQUI PONER EL SECRET DE NUESTRA API key'
err = True
while err:
try:
polo = poloniex.Poloniex(API_key,Secret)
err = False
print('### CONECTADO CORRECTAMENTE A LA API DE POLONIEX ###')
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL CONECTAR CON API POLONEX ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
tot_buy = 0
tot_sell= 0
margen = 0.0
while margen <0.5:
m = str(input('Entra margen de beneficio (>=0.5) :? '))
margen = float(m.replace(',','.'))
margen = margen/100
n = 1
while True:
openOrders = leer_ordenes()
nOrdenes = len(openOrders)
operativa = leer_operativa()
if operativa == 1 or operativa == 2:
if nOrdenes == 0 : #Escenario sin inversion. Poner 2 ordenes
crear_ordenes(margen)
elif nOrdenes == 1: #Escenario Con una orden cerrada y una orden abierta. Hay que cerrarla si hay suficiente saldo para nueva inversion
for no in openOrders:
num_orden_cerrar = no['orderNumber']
tipo_orden_cerrar = no['type']
try:
if tipo_orden_cerrar == 'sell':
tot_buy +=1
elif tipo_orden_cerrar == 'buy':
tot_sell +=1
saldoUSDT, saldoBTC = leer_balance()
if saldoBTC < 0.0005 or saldoUSDT < 1:
print('### ERROR SALDO INSUFICIENTE PARA CANCELAR ORDEN ###')
print('### ESPERANDO QUE SE CIERRE ORDEN PENDIENTE ###')
time.sleep(300)
else:
cancelar_orden = polo.cancelOrder(num_orden_cerrar)
print('### CANCELADA ORDEN: ' + str(num_orden_cerrar))
print('### ESPERANDO 2 MINUTOS PARA CONTINUAR ###')
time.sleep(120)
except KeyboardInterrupt:
exit()
except Exception:
print('### ERROR AL CANCELAR ORDEN ###')
print('### ESPERANDO 30 SEGUNDOS ###')
time.sleep(30)
elif nOrdenes == 2: # Escenario con toda la inversion aun abierta. No hacer nada
ticker = polo.returnTicker()
t = ticker['USDT_BTC']
last = float(t['last'])
print('-------------------------------------------------------')
print(str(n) + ') Buy Ord: ' + str(tot_buy) + ' - Sell Ord: ' + str(tot_sell) + ' - btc = ' + str(last) + ' $')
n +=1
for orde in openOrders:
print(orde['type'] + ' - ' + orde['date'] + ' - ' + orde['rate'] + ' $ - ' + orde['amount'] + ' btc')
print('-------------------------------------------------------')
print('### ESPERANDO 5 MINUTOS PARA CONTINUAR ###')
time.sleep(300)
elif nOrdenes > 2: # Escenario ERROR demasiadas ordenes abiertas
print('### ERROR - DEMASIADAS ORDENES ABIERTAS - Max 2 ORDENES. Act. ' + str(nOrdenes) + ' ABIERTAS ###')
print('### ESPERANDO A QUE SE CIERREN ###')
time.sleep(300)
else:
print ('### PROCESO CANCELADO ###')
exit()
if operativa == 2:
print ('### PROCESO FINALIZADO ###')
exit() | es | 0.544028 | # -*- coding: utf-8 -*- ## ERROR AL LEER LAS ORDENES ABIERTAS ###') ## ESPERANDO 30 SEGUNDOS ###') ## ERROR AL CREAR ORDEN DE COMPRA ###') ## ESPERANDO 30 SEGUNDOS ###') ## ERROR AL CREAR ORDEN DE VENTA ###') ## ESPERANDO 30 SEGUNDOS ###') ## ORDENES REALIZADAS CORRECTAMENTE - ESPERANDO 5 MINUTOS ###') ## ERROR AL LEER VALOR ACTUAL btc ###') ## ESPERANDO 30 SEGUNDOS ###') ## ERROR AL LEER VALOR ACTUAL btc ###') ## ESPERANDO 30 SEGUNDOS ###') ## ERROR AL LEER SALDOS DE LA CUENTA ###') ## ESPERANDO 30 SEGUNDOS ###') ## ERROR SALDO INSUFICIENTE PARA REALIZAR ORDEN ###') ## ESPERANDO NUEVO SALDO ###') # PROGRAMA PRINCIPAL ################################################################## ## CONECTADO CORRECTAMENTE A LA API DE POLONIEX ###') ## ERROR AL CONECTAR CON API POLONEX ###') ## ESPERANDO 30 SEGUNDOS ###') #Escenario sin inversion. Poner 2 ordenes #Escenario Con una orden cerrada y una orden abierta. Hay que cerrarla si hay suficiente saldo para nueva inversion ## ERROR SALDO INSUFICIENTE PARA CANCELAR ORDEN ###') ## ESPERANDO QUE SE CIERRE ORDEN PENDIENTE ###') ## CANCELADA ORDEN: ' + str(num_orden_cerrar)) ## ESPERANDO 2 MINUTOS PARA CONTINUAR ###') ## ERROR AL CANCELAR ORDEN ###') ## ESPERANDO 30 SEGUNDOS ###') # Escenario con toda la inversion aun abierta. No hacer nada ## ESPERANDO 5 MINUTOS PARA CONTINUAR ###') # Escenario ERROR demasiadas ordenes abiertas ## ERROR - DEMASIADAS ORDENES ABIERTAS - Max 2 ORDENES. Act. ' + str(nOrdenes) + ' ABIERTAS ###') ## ESPERANDO A QUE SE CIERREN ###') ## PROCESO CANCELADO ###') ## PROCESO FINALIZADO ###') | 2.623698 | 3 |
mkagentc.py | fakeNetflix/facebook-repo-fb-adb | 716 | 6618755 | <filename>mkagentc.py
#!/usr/bin/env python3
# -*- python-indent-offset: 2 -*-
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in
# the LICENSE file in the root directory of this source tree. An
# additional grant of patent rights can be found in the PATENTS file
# in the same directory.
#
# This file generates agent.c from the agent dex file. Using Python
# for this purpose is convenient, as it includes both a deterministic
# zip file generator and a hashing utility.
#
import os
import sys
import subprocess
import zipfile
from hashlib import sha256
from io import BytesIO
def die(msg):
print(msg, file=sys.stderr)
sys.exit(1)
if len(sys.argv) != 2:
die("invalid usage")
agent_dex_file_name=sys.argv[1]
xxd=os.environ["XXD"]
with open(agent_dex_file_name, "rb") as agent_dex:
agent_dex_contents = agent_dex.read()
jar_file = BytesIO()
with zipfile.ZipFile(
jar_file,
mode="w",
allowZip64=False) as zip:
# Explicitly specificy date for determinism
fileinfo = zipfile.ZipInfo("classes.dex", (1980, 1, 1, 0, 0, 0))
zip.writestr(fileinfo, agent_dex_contents)
jar_file.seek(0)
agent_dex_jar_contents=jar_file.read()
xxd_output = subprocess.check_output(
(xxd, "-i"),
input = agent_dex_jar_contents)
print("""\
#include <stdint.h>
#include "agent.h"
const char agent_name[] = "%(agent_name)s";
const uint8_t agent_dex_jar[] = {
%(xxd_output)s
};
const size_t agent_dex_jar_size = sizeof(agent_dex_jar);
""" % {
"agent_name": "agent-" + sha256(agent_dex_jar_contents).hexdigest()[:16],
"xxd_output": xxd_output.decode("ascii"),
})
| <filename>mkagentc.py
#!/usr/bin/env python3
# -*- python-indent-offset: 2 -*-
# Copyright (c) 2014, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in
# the LICENSE file in the root directory of this source tree. An
# additional grant of patent rights can be found in the PATENTS file
# in the same directory.
#
# This file generates agent.c from the agent dex file. Using Python
# for this purpose is convenient, as it includes both a deterministic
# zip file generator and a hashing utility.
#
import os
import sys
import subprocess
import zipfile
from hashlib import sha256
from io import BytesIO
def die(msg):
print(msg, file=sys.stderr)
sys.exit(1)
if len(sys.argv) != 2:
die("invalid usage")
agent_dex_file_name=sys.argv[1]
xxd=os.environ["XXD"]
with open(agent_dex_file_name, "rb") as agent_dex:
agent_dex_contents = agent_dex.read()
jar_file = BytesIO()
with zipfile.ZipFile(
jar_file,
mode="w",
allowZip64=False) as zip:
# Explicitly specificy date for determinism
fileinfo = zipfile.ZipInfo("classes.dex", (1980, 1, 1, 0, 0, 0))
zip.writestr(fileinfo, agent_dex_contents)
jar_file.seek(0)
agent_dex_jar_contents=jar_file.read()
xxd_output = subprocess.check_output(
(xxd, "-i"),
input = agent_dex_jar_contents)
print("""\
#include <stdint.h>
#include "agent.h"
const char agent_name[] = "%(agent_name)s";
const uint8_t agent_dex_jar[] = {
%(xxd_output)s
};
const size_t agent_dex_jar_size = sizeof(agent_dex_jar);
""" % {
"agent_name": "agent-" + sha256(agent_dex_jar_contents).hexdigest()[:16],
"xxd_output": xxd_output.decode("ascii"),
})
| en | 0.759307 | #!/usr/bin/env python3 # -*- python-indent-offset: 2 -*- # Copyright (c) 2014, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in # the LICENSE file in the root directory of this source tree. An # additional grant of patent rights can be found in the PATENTS file # in the same directory. # # This file generates agent.c from the agent dex file. Using Python # for this purpose is convenient, as it includes both a deterministic # zip file generator and a hashing utility. # # Explicitly specificy date for determinism \ #include <stdint.h> #include "agent.h" const char agent_name[] = "%(agent_name)s"; const uint8_t agent_dex_jar[] = { %(xxd_output)s }; const size_t agent_dex_jar_size = sizeof(agent_dex_jar); | 2.024071 | 2 |
setup.py | tylertrussell/flake8-docstrings-catnado | 0 | 6618756 | <filename>setup.py
from setuptools import setup
from flake8_docstrings import __version__
def get_long_description():
with open('README.md') as f:
return f.read()
setup(
name='flake8-docstrings-catnado',
version=__version__,
description='a fork of flake8-docstrings-catnado',
long_description=get_long_description(),
license='MIT License',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/tylertrussell/flake8-docstrings-catnado',
entry_points={
'flake8.extension': [
'D = flake8_docstrings:pep257Checker',
],
},
install_requires=['flake8', 'pydocstyle >= 2.1', 'flake8-polyfill'],
provides=['flake8_docstrings'],
py_modules=['flake8_docstrings'],
)
| <filename>setup.py
from setuptools import setup
from flake8_docstrings import __version__
def get_long_description():
with open('README.md') as f:
return f.read()
setup(
name='flake8-docstrings-catnado',
version=__version__,
description='a fork of flake8-docstrings-catnado',
long_description=get_long_description(),
license='MIT License',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/tylertrussell/flake8-docstrings-catnado',
entry_points={
'flake8.extension': [
'D = flake8_docstrings:pep257Checker',
],
},
install_requires=['flake8', 'pydocstyle >= 2.1', 'flake8-polyfill'],
provides=['flake8_docstrings'],
py_modules=['flake8_docstrings'],
)
| none | 1 | 1.478989 | 1 | |
jsonextended/parsers/hdf5.py | chrisjsewell/jsonextended | 5 | 6618757 | #!/usr/bin/env python
import h5py
class HDF5_Parser(object): # noqa: N801
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
| #!/usr/bin/env python
import h5py
class HDF5_Parser(object): # noqa: N801
"""
Examples
--------
>>> import h5py
>>> indata = h5py.File('test.hdf5')
>>> dataset = indata.create_dataset("mydataset", (10,), dtype='i')
>>> indata.close()
>>> with open('test.hdf5') as f:
... data = HDF5_Parser().read_file(f)
>>> data['mydataset'][:]
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32)
>>> import os
>>> os.remove('test.hdf5')
"""
plugin_name = 'hdf5.read'
plugin_descript = 'read *.hdf5 (in read mode) files using h5py'
file_regex = '*.hdf5'
def read_file(self, file_obj, **kwargs):
return h5py.File(file_obj.name, mode='r')
| en | 0.285179 | #!/usr/bin/env python # noqa: N801 Examples -------- >>> import h5py >>> indata = h5py.File('test.hdf5') >>> dataset = indata.create_dataset("mydataset", (10,), dtype='i') >>> indata.close() >>> with open('test.hdf5') as f: ... data = HDF5_Parser().read_file(f) >>> data['mydataset'][:] array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int32) >>> import os >>> os.remove('test.hdf5') | 2.846332 | 3 |
catkin_ws/src/10-lane-control/anti_instagram/sandbox/KMeansTrialczuidema.py | johnson880319/Software | 0 | 6618758 | <reponame>johnson880319/Software
#!/usr/bin/env python
import sys
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import argparse
from matplotlib.patches import Rectangle
from sklearn.cluster import KMeans
from collections import Counter
import math
from anti_instagram.calcLstsqTransform import calcTransform
from anti_instagram.AntiInstagram import *
from anti_instagram.scale_and_shift import *
# from .scale_and_shift import scaleandshift
# from .scale_and_shift import scaleandshift2
from anti_instagram.simpleColorBalanceClass import *
from colorBalanceKMeans import *
from outlierEstimation import *
class kMeanClass:
""" This class gives the ability to use the kMeans alg. with different numbers of initial centers """
input_image = []
resized_image = []
blurred_image = []
image_array = []
num_centers = -1
blur_alg = []
fac_resize = -1
blur_kernel = -1
trained_centers = []
labels = []
labelcount = Counter()
color_array = []
color_image_array = []
# initialize
def __init__(self, inputImage, numCenters, blurAlg, resize, blurKer):
self.input_image = inputImage
self.num_centers = int(numCenters)
self.blur_alg = blurAlg
self.fac_resize = float(resize)
self.blur_kernel = int(blurKer)
self.shiftB = None
self.shiftG = None
self.shiftR = None
# set up array for center colors
self.color_image_array = np.zeros((self.num_centers, 200, 200, 3), np.uint8)
print('created instance of kMeans with arguments:')
print(' number of centers = ' + str(self.num_centers))
print(' blur algorithm = ' + str(self.blur_alg))
print(' resize factor = ' + str(self.fac_resize))
print(' blurring kernel size = ' + str(self.blur_kernel))
# re-shape input image for kMeans
def _getimgdatapts(self, cv2img):
x, y, p = cv2img.shape
img_geom = cv2img[int(x*0):(x-1), :, :]
x_new, y_new, p = img_geom.shape
cv2_tpose = img_geom.transpose()
cv2_arr_tpose = np.reshape(cv2_tpose, [p, x_new * y_new])
npdata = np.transpose(cv2_arr_tpose)
return npdata
def _blurImg(self):
# blur image using median:
if self.blur_alg == 'median':
self.blurred_image = cv2.medianBlur(self.resized_image, self.blur_kernel)
# blur image using gaussian:
elif self.blur_alg == 'gaussian':
self.blurred_image = cv2.GaussianBlur(self.resized_image, (self.blur_kernel, self.blur_kernel), 0)
def _plotColors(self):
# loop over all centers
for center in np.arange(self.num_centers):
# get color
color_i = tuple(
[self.trained_centers[center, 2], self.trained_centers[center, 1], self.trained_centers[center, 0]])
self.color_array.append(color_i)
self.color_image_array[center, :] = color_i
plotRows = int(math.ceil(self.num_centers / 2.0))
f, axarr = plt.subplots(plotRows, 2)
for row in range(plotRows):
if self.num_centers % 2 == 0:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].imshow(self.color_image_array[2 * row + 1])
axarr[row, 1].axis('off')
axarr[row, 1].set_title(str(self.labelcount[2 * row + 1]))
else:
if row != plotRows - 1:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].imshow(self.color_image_array[2 * row + 1])
axarr[row, 1].axis('off')
axarr[row, 1].set_title(str(self.labelcount[2 * row + 1]))
else:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].axis('off')
print(self.color_array)
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
# apply kMeans alg
def applyKM(self):
# resize image
self.resized_image = cv2.resize(self.input_image, (0, 0), fx=self.fac_resize, fy=self.fac_resize)
print('resized image!')
# blur image
self._blurImg()
print('blurred image!')
# self.blurred_image, self.shiftB, self.shiftG, self.shiftR = blackBalance(self.blurred_image)
# prepare KMeans
kmc = KMeans(n_clusters=self.num_centers, init='k-means++', max_iter=20)
# try out color balance first
# self.blurred_image = simplest_cb(self.blurred_image, 1) # percentages around 1% are normal
cv2.namedWindow('blurred', flags=cv2.WINDOW_NORMAL)
cv2.imshow('blurred', self.blurred_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# prepare data points
self.image_array = self._getimgdatapts(self.blurred_image)
# debug
print(self.image_array.shape)
# run KMeans
kmc.fit(self.image_array)
# get centers, labels and labelcount from KMeans
self.trained_centers = kmc.cluster_centers_
self.labels = kmc.labels_
for i in np.arange(self.num_centers):
self.labelcount[i] = np.sum(self.labels == i)
# plot colors
self._plotColors()
def determineColor(self, withRed, trained_centers):
# define the true centers. This color is preset. The color transformation
# tries to transform a picture such that the black areas will become true black.
# The same applies for yellow, white and (if valid) red.
trueBlack = [60, 60, 60]
if (withRed):
trueRed = [60, 60, 240]
trueYellow = [50, 240, 240]
trueWhite = [240, 240, 240]
errorBlack = np.zeros(self.num_centers)
errorYellow = np.zeros(self.num_centers)
errorWhite = np.zeros(self.num_centers)
if (withRed):
errorRed = np.zeros(self.num_centers)
for i in range(self.num_centers):
print(trained_centers[i])
errorBlack[i] = np.linalg.norm(trueBlack - trained_centers[i])
errorYellow[i] = np.linalg.norm(trueYellow - trained_centers[i])
errorWhite[i] = np.linalg.norm(trueWhite - trained_centers[i])
if (withRed):
errorRed[i] = np.linalg.norm(trueRed - trained_centers[i])
print "black error:" + str(errorBlack)
print "yellow error:" + str(errorYellow)
print "white error:" + str(errorWhite)
print "red error:" + str(errorRed)
nTrueCenters = 3
errorBlackSortedIdx = np.argsort(errorBlack)
errorYellowSortedIdx = np.argsort(errorYellow)
errorWhiteSortedIdx = np.argsort(errorWhite)
if (withRed):
errorRedSortedIdx = np.argsort(errorRed)
if (withRed):
nTrueCenters = 4
ListOfIndices = []
blackIdxFound = False
whiteIdxFound = False
yellowIdxFound = False
if (withRed):
redIdxFound = False
centersFound = False
index = 0
print "errorBlackSortedIdx: " + str(errorBlackSortedIdx)
print "errorYellowSortedIdx: " + str(errorYellowSortedIdx)
print "errorWhiteSortedIdx: " + str(errorWhiteSortedIdx)
print "errorRedSortedIdx: " + str(errorRedSortedIdx)
while (not centersFound):
if errorBlackSortedIdx[index] not in ListOfIndices and not blackIdxFound:
ListOfIndices.append(errorBlackSortedIdx[index])
print str(index) + " in black " + str(ListOfIndices)
blackIdxFound = True
idxBlack = errorBlackSortedIdx[index]
if errorWhiteSortedIdx[index] not in ListOfIndices and not whiteIdxFound:
ListOfIndices.append(errorWhiteSortedIdx[index])
print str(index) + " in white " + str(ListOfIndices)
whiteIdxFound = True
idxWhite = errorWhiteSortedIdx[index]
if errorYellowSortedIdx[index] not in ListOfIndices and not yellowIdxFound:
ListOfIndices.append(errorYellowSortedIdx[index])
print str(index) + " in yellow " + str(ListOfIndices)
yellowIdxFound = True
idxYellow = errorYellowSortedIdx[index]
if withRed:
if errorRedSortedIdx[index] not in ListOfIndices and not redIdxFound:
ListOfIndices.append(errorRedSortedIdx[index])
redIdxFound = True
print str(index) + "in red" + str(ListOfIndices)
idxRed = errorRedSortedIdx[index]
print "True?: " + str(redIdxFound) + str(yellowIdxFound) + str(whiteIdxFound) + str(blackIdxFound)
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound and redIdxFound
print "centersFound: " + str(centersFound)
else:
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound
index = index + 1
print "End of while loop. Index: " + str(index)
print idxRed, idxWhite, idxYellow, idxBlack
if (withRed):
return idxBlack, idxRed, idxYellow, idxWhite
else:
return idxBlack, idxYellow, idxWhite
def plotDeterminedCenters(self, centerBlack, centerYellow, centerWhite, centerRed):
tupleBlack = tuple([centerBlack[2], centerBlack[1], centerBlack[0]])
tupleWhite = tuple([centerWhite[2], centerWhite[1], centerWhite[0]])
tupleYellow = tuple([centerYellow[2], centerYellow[1], centerYellow[0]])
tupleRed = tuple([centerRed[2], centerRed[1], centerRed[0]])
imageBlack = np.zeros((200, 200, 3), np.uint8)
imageBlack[:] = tupleBlack
imageWhite = np.zeros((200, 200, 3), np.uint8)
imageWhite[:] = tupleWhite
imageYellow = np.zeros((200, 200, 3), np.uint8)
imageYellow[:] = tupleYellow
imageRed = np.zeros((200, 200, 3), np.uint8)
imageRed[:] = tupleRed
f, axarr = plt.subplots(2, 2)
axarr[0, 0].imshow(imageBlack)
axarr[0, 0].axis('off')
axarr[0, 0].set_title("Black")
axarr[0, 1].imshow(imageWhite)
axarr[0, 1].axis('off')
axarr[0, 1].set_title("White")
axarr[1, 0].imshow(imageYellow)
axarr[1, 0].axis('off')
axarr[1, 0].set_title("Yellow")
axarr[1, 1].imshow(imageRed)
axarr[1, 1].axis('off')
axarr[1, 1].set_title("Red")
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
# define and parse command line arguments
parser = argparse.ArgumentParser(
description='Perform kMeans with n initial centers.')
parser.add_argument('img_path', help='path to the image')
parser.add_argument('n_centers', help='numbers of initial centers')
parser.add_argument('--resize', default='0.1',
help='factor of downsampling the input image. DEFAULT = 0.1')
parser.add_argument('--blur', default='median',
help="blur algorithm. 'median' or 'gaussian. DEFAULT = median")
parser.add_argument('--blur_kernel', default='5',
help='size of kernel for blurring. DEFAULT = 5')
parser.add_argument('--output_dir', default='./output_images',
help='directory for the output images. DEFAULT = ./output_images')
args = parser.parse_args()
# check if file exists
if not os.path.isfile(args.img_path):
print('file not found')
sys.exit(2)
# check if dir exists, create if not
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# check resize factor
if (args.resize < 1) or (args.resize <= 0):
print('resize factor between 0 and 1')
sys.exit(2)
# check blur alg
if not (args.blur == "median" or args.blur == "gaussian"):
print('blur alg must be median or gaussian')
sys.exit(2)
# check kernel size
print "kernel: " + str(args.blur_kernel)
if (int(args.blur_kernel) % 2 == 0):
print('kernel size must be odd')
sys.exit(2)
# create instance of kMeans
print("all arguments have been read.")
inputImage = cv2.imread(args.img_path, cv2.IMREAD_UNCHANGED)
CB = simpleColorBalanceClass()
CB.thresholdAnalysis(inputImage, 1)
imageBalanced = CB.applyTrafo(inputImage)
KM = kMeanClass(imageBalanced, args.n_centers, args.blur, args.resize, args.blur_kernel)
cv2.namedWindow('input', flags=cv2.WINDOW_NORMAL)
cv2.imshow('input', inputImage)
cv2.namedWindow('balanced', flags=cv2.WINDOW_NORMAL)
cv2.imshow('balanced', imageBalanced)
cv2.waitKey(0)
cv2.destroyAllWindows()
KM.applyKM()
idxBlack, idxRed, idxYellow, idxWhite = KM.determineColor(True, KM.trained_centers)
trained_centers = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxRed],
KM.trained_centers[idxYellow], KM.trained_centers[idxWhite]])
print "the trained centers are: " + str(trained_centers)
KM.plotDeterminedCenters(KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite], KM.trained_centers[idxRed])
trained_centers_woRed = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite]])
true_centers = np.vstack([[70, 50, 60], [50, 70, 240], [60, 240, 230], [250, 250, 250]])
outlierIndex, outlierCenter = detectOutlier(trained_centers, true_centers)
true_centers_woOutlier = np.delete(true_centers, outlierIndex, 0)
trained_centers_woOutlier = np.delete(trained_centers, outlierIndex, 0)
print "outlier center is: " + str(outlierCenter)
print("transform instance will be created!")
T = calcTransform(3, trained_centers_woOutlier, true_centers_woOutlier)
T.calcTransform()
# corr_img1 = scaleandshift2(KM.input_image, [1, 1, 1], [KM.shiftB, KM.shiftG, KM.shiftR])
corrected_img = scaleandshift2(KM.input_image, T.scale, T.shift)
corrected_image_cv2 = np.clip(
corrected_img, 0, 255).astype(np.uint8)
cv2.namedWindow('corrected', flags=cv2.WINDOW_NORMAL)
cv2.imshow('corrected', corrected_image_cv2)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main())
"""
def batchExtraction(image, batchSideLength):
xSize, ySize, zSize = image.shape
xSizeNew = int(xSize / batchSideLength)
ySizeNew = int(ySize / batchSideLength)
newImage = np.zeros((xSizeNew,ySizeNew,zSize))
for i in range(xSizeNew):
for j in range(ySizeNew):
# create indices for the batches
xlow = i*batchSideLength
xhigh = (i+1)*batchSideLength
ylow = j*batchSideLength
yhigh = (j+1)*batchSideLength
if(i == (xSizeNew-1) ):
xhigh = xSize - 1
if(j == (ySizeNew - 1)):
yhigh = ySize -1
# average the batches
newImage[i, j, 0] = np.mean(image[xlow:xhigh, ylow:yhigh, 0])
newImage[i, j, 1] = np.mean(image[xlow:xhigh, ylow:yhigh, 1])
newImage[i, j, 2] = np.mean(image[xlow:xhigh, ylow:yhigh, 2])
return newImage
input_img = cv2.imread("test_images/pic3.jpg", cv2.IMREAD_UNCHANGED)
#input_img_converted = getimgdatapts(input_img)
#print(input_img_converted.shape)
width, height, channels = input_img.shape
trial = cv2.resize(input_img, (0, 0), fx=0.1, fy=0.1)
print(trial.shape)
# blur image using gaussian:
blurG = cv2.GaussianBlur(trial, (5,5), 0)
# blur image using median:
blurM = cv2.medianBlur(trial, 5)
# plot both blurred images
blurBoth = np.concatenate((blurG, blurM), axis=1)
# apply kmeans on blurred image:
# number of centers for kmeans
n_centers = 6
kmc = KMeans(n_clusters=n_centers, init='k-means++', max_iter=20)
trial_converted = getimgdatapts(blurM)
kmc.fit(trial_converted)
trained_centers = kmc.cluster_centers_
labels = kmc.labels_
# print centers and counts
labelcount = Counter()
for i in np.arange(n_centers):
labelcount[i] = np.sum(labels == i)
print(labelcount)
print(trained_centers)
print(kmc.cluster_centers_[1]/255)
str0 = tuple([kmc.cluster_centers_[0,2],kmc.cluster_centers_[0,1],kmc.cluster_centers_[0,0]])
str1 = tuple([kmc.cluster_centers_[1,2],kmc.cluster_centers_[1,1],kmc.cluster_centers_[1,0]])
str2 = tuple([kmc.cluster_centers_[2,2],kmc.cluster_centers_[2,1],kmc.cluster_centers_[2,0]])
str3 = tuple([kmc.cluster_centers_[3,2],kmc.cluster_centers_[3,1],kmc.cluster_centers_[3,0]])
str4 = tuple([kmc.cluster_centers_[4,2],kmc.cluster_centers_[4,1],kmc.cluster_centers_[4,0]])
str5 = tuple([kmc.cluster_centers_[5,2],kmc.cluster_centers_[5,1],kmc.cluster_centers_[5,0]])
print(str1)
image0 = np.zeros((200, 200, 3), np.uint8)
image0[:] = str0
image1 = np.zeros((200, 200, 3), np.uint8)
image1[:] = str1
image2 = np.zeros((200, 200, 3), np.uint8)
image2[:] = str2
image3 = np.zeros((200, 200, 3), np.uint8)
image3[:] = str3
image4 = np.zeros((200, 200, 3), np.uint8)
image4[:] = str4
image5 = np.zeros((200, 200, 3), np.uint8)
image5[:] = str5
labelArray = kmc.labels_
num0 = np.sum(labelArray==0)
num1 = np.sum(labelArray==1)
num2 = np.sum(labelArray==2)
num3 = np.sum(labelArray==3)
num4 = np.sum(labelArray==4)
num5 = np.sum(labelArray==5)
f, axarr = plt.subplots(3, 2)
axarr[0,0].imshow(image0)
axarr[0,0].axis('off')
axarr[0,0].set_title(str(num0))
axarr[0,1].imshow(image1)
axarr[0,1].axis('off')
axarr[0,1].set_title(str(num1))
axarr[1,0].imshow(image2)
axarr[1,0].axis('off')
axarr[1,0].set_title(str(num2))
axarr[1,1].imshow(image3)
axarr[1,1].axis('off')
axarr[1,1].set_title(str(num3))
axarr[2,0].imshow(image4)
axarr[2,0].axis('off')
axarr[2,0].set_title(str(num4))
axarr[2,1].imshow(image5)
axarr[2,1].axis('off')
axarr[2,1].set_title(str(num5))
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
for i in range(kmc.n_clusters):
print(np.sum(labelArray==i))
"""
| #!/usr/bin/env python
import sys
import os
import cv2
import numpy as np
import matplotlib.pyplot as plt
import argparse
from matplotlib.patches import Rectangle
from sklearn.cluster import KMeans
from collections import Counter
import math
from anti_instagram.calcLstsqTransform import calcTransform
from anti_instagram.AntiInstagram import *
from anti_instagram.scale_and_shift import *
# from .scale_and_shift import scaleandshift
# from .scale_and_shift import scaleandshift2
from anti_instagram.simpleColorBalanceClass import *
from colorBalanceKMeans import *
from outlierEstimation import *
class kMeanClass:
""" This class gives the ability to use the kMeans alg. with different numbers of initial centers """
input_image = []
resized_image = []
blurred_image = []
image_array = []
num_centers = -1
blur_alg = []
fac_resize = -1
blur_kernel = -1
trained_centers = []
labels = []
labelcount = Counter()
color_array = []
color_image_array = []
# initialize
def __init__(self, inputImage, numCenters, blurAlg, resize, blurKer):
self.input_image = inputImage
self.num_centers = int(numCenters)
self.blur_alg = blurAlg
self.fac_resize = float(resize)
self.blur_kernel = int(blurKer)
self.shiftB = None
self.shiftG = None
self.shiftR = None
# set up array for center colors
self.color_image_array = np.zeros((self.num_centers, 200, 200, 3), np.uint8)
print('created instance of kMeans with arguments:')
print(' number of centers = ' + str(self.num_centers))
print(' blur algorithm = ' + str(self.blur_alg))
print(' resize factor = ' + str(self.fac_resize))
print(' blurring kernel size = ' + str(self.blur_kernel))
# re-shape input image for kMeans
def _getimgdatapts(self, cv2img):
x, y, p = cv2img.shape
img_geom = cv2img[int(x*0):(x-1), :, :]
x_new, y_new, p = img_geom.shape
cv2_tpose = img_geom.transpose()
cv2_arr_tpose = np.reshape(cv2_tpose, [p, x_new * y_new])
npdata = np.transpose(cv2_arr_tpose)
return npdata
def _blurImg(self):
# blur image using median:
if self.blur_alg == 'median':
self.blurred_image = cv2.medianBlur(self.resized_image, self.blur_kernel)
# blur image using gaussian:
elif self.blur_alg == 'gaussian':
self.blurred_image = cv2.GaussianBlur(self.resized_image, (self.blur_kernel, self.blur_kernel), 0)
def _plotColors(self):
# loop over all centers
for center in np.arange(self.num_centers):
# get color
color_i = tuple(
[self.trained_centers[center, 2], self.trained_centers[center, 1], self.trained_centers[center, 0]])
self.color_array.append(color_i)
self.color_image_array[center, :] = color_i
plotRows = int(math.ceil(self.num_centers / 2.0))
f, axarr = plt.subplots(plotRows, 2)
for row in range(plotRows):
if self.num_centers % 2 == 0:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].imshow(self.color_image_array[2 * row + 1])
axarr[row, 1].axis('off')
axarr[row, 1].set_title(str(self.labelcount[2 * row + 1]))
else:
if row != plotRows - 1:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].imshow(self.color_image_array[2 * row + 1])
axarr[row, 1].axis('off')
axarr[row, 1].set_title(str(self.labelcount[2 * row + 1]))
else:
axarr[row, 0].imshow(self.color_image_array[2 * row])
axarr[row, 0].axis('off')
axarr[row, 0].set_title(str(self.labelcount[2 * row]))
axarr[row, 1].axis('off')
print(self.color_array)
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
# apply kMeans alg
def applyKM(self):
# resize image
self.resized_image = cv2.resize(self.input_image, (0, 0), fx=self.fac_resize, fy=self.fac_resize)
print('resized image!')
# blur image
self._blurImg()
print('blurred image!')
# self.blurred_image, self.shiftB, self.shiftG, self.shiftR = blackBalance(self.blurred_image)
# prepare KMeans
kmc = KMeans(n_clusters=self.num_centers, init='k-means++', max_iter=20)
# try out color balance first
# self.blurred_image = simplest_cb(self.blurred_image, 1) # percentages around 1% are normal
cv2.namedWindow('blurred', flags=cv2.WINDOW_NORMAL)
cv2.imshow('blurred', self.blurred_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# prepare data points
self.image_array = self._getimgdatapts(self.blurred_image)
# debug
print(self.image_array.shape)
# run KMeans
kmc.fit(self.image_array)
# get centers, labels and labelcount from KMeans
self.trained_centers = kmc.cluster_centers_
self.labels = kmc.labels_
for i in np.arange(self.num_centers):
self.labelcount[i] = np.sum(self.labels == i)
# plot colors
self._plotColors()
def determineColor(self, withRed, trained_centers):
# define the true centers. This color is preset. The color transformation
# tries to transform a picture such that the black areas will become true black.
# The same applies for yellow, white and (if valid) red.
trueBlack = [60, 60, 60]
if (withRed):
trueRed = [60, 60, 240]
trueYellow = [50, 240, 240]
trueWhite = [240, 240, 240]
errorBlack = np.zeros(self.num_centers)
errorYellow = np.zeros(self.num_centers)
errorWhite = np.zeros(self.num_centers)
if (withRed):
errorRed = np.zeros(self.num_centers)
for i in range(self.num_centers):
print(trained_centers[i])
errorBlack[i] = np.linalg.norm(trueBlack - trained_centers[i])
errorYellow[i] = np.linalg.norm(trueYellow - trained_centers[i])
errorWhite[i] = np.linalg.norm(trueWhite - trained_centers[i])
if (withRed):
errorRed[i] = np.linalg.norm(trueRed - trained_centers[i])
print "black error:" + str(errorBlack)
print "yellow error:" + str(errorYellow)
print "white error:" + str(errorWhite)
print "red error:" + str(errorRed)
nTrueCenters = 3
errorBlackSortedIdx = np.argsort(errorBlack)
errorYellowSortedIdx = np.argsort(errorYellow)
errorWhiteSortedIdx = np.argsort(errorWhite)
if (withRed):
errorRedSortedIdx = np.argsort(errorRed)
if (withRed):
nTrueCenters = 4
ListOfIndices = []
blackIdxFound = False
whiteIdxFound = False
yellowIdxFound = False
if (withRed):
redIdxFound = False
centersFound = False
index = 0
print "errorBlackSortedIdx: " + str(errorBlackSortedIdx)
print "errorYellowSortedIdx: " + str(errorYellowSortedIdx)
print "errorWhiteSortedIdx: " + str(errorWhiteSortedIdx)
print "errorRedSortedIdx: " + str(errorRedSortedIdx)
while (not centersFound):
if errorBlackSortedIdx[index] not in ListOfIndices and not blackIdxFound:
ListOfIndices.append(errorBlackSortedIdx[index])
print str(index) + " in black " + str(ListOfIndices)
blackIdxFound = True
idxBlack = errorBlackSortedIdx[index]
if errorWhiteSortedIdx[index] not in ListOfIndices and not whiteIdxFound:
ListOfIndices.append(errorWhiteSortedIdx[index])
print str(index) + " in white " + str(ListOfIndices)
whiteIdxFound = True
idxWhite = errorWhiteSortedIdx[index]
if errorYellowSortedIdx[index] not in ListOfIndices and not yellowIdxFound:
ListOfIndices.append(errorYellowSortedIdx[index])
print str(index) + " in yellow " + str(ListOfIndices)
yellowIdxFound = True
idxYellow = errorYellowSortedIdx[index]
if withRed:
if errorRedSortedIdx[index] not in ListOfIndices and not redIdxFound:
ListOfIndices.append(errorRedSortedIdx[index])
redIdxFound = True
print str(index) + "in red" + str(ListOfIndices)
idxRed = errorRedSortedIdx[index]
print "True?: " + str(redIdxFound) + str(yellowIdxFound) + str(whiteIdxFound) + str(blackIdxFound)
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound and redIdxFound
print "centersFound: " + str(centersFound)
else:
centersFound = blackIdxFound and whiteIdxFound and yellowIdxFound
index = index + 1
print "End of while loop. Index: " + str(index)
print idxRed, idxWhite, idxYellow, idxBlack
if (withRed):
return idxBlack, idxRed, idxYellow, idxWhite
else:
return idxBlack, idxYellow, idxWhite
def plotDeterminedCenters(self, centerBlack, centerYellow, centerWhite, centerRed):
tupleBlack = tuple([centerBlack[2], centerBlack[1], centerBlack[0]])
tupleWhite = tuple([centerWhite[2], centerWhite[1], centerWhite[0]])
tupleYellow = tuple([centerYellow[2], centerYellow[1], centerYellow[0]])
tupleRed = tuple([centerRed[2], centerRed[1], centerRed[0]])
imageBlack = np.zeros((200, 200, 3), np.uint8)
imageBlack[:] = tupleBlack
imageWhite = np.zeros((200, 200, 3), np.uint8)
imageWhite[:] = tupleWhite
imageYellow = np.zeros((200, 200, 3), np.uint8)
imageYellow[:] = tupleYellow
imageRed = np.zeros((200, 200, 3), np.uint8)
imageRed[:] = tupleRed
f, axarr = plt.subplots(2, 2)
axarr[0, 0].imshow(imageBlack)
axarr[0, 0].axis('off')
axarr[0, 0].set_title("Black")
axarr[0, 1].imshow(imageWhite)
axarr[0, 1].axis('off')
axarr[0, 1].set_title("White")
axarr[1, 0].imshow(imageYellow)
axarr[1, 0].axis('off')
axarr[1, 0].set_title("Yellow")
axarr[1, 1].imshow(imageRed)
axarr[1, 1].axis('off')
axarr[1, 1].set_title("Red")
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
def main():
# define and parse command line arguments
parser = argparse.ArgumentParser(
description='Perform kMeans with n initial centers.')
parser.add_argument('img_path', help='path to the image')
parser.add_argument('n_centers', help='numbers of initial centers')
parser.add_argument('--resize', default='0.1',
help='factor of downsampling the input image. DEFAULT = 0.1')
parser.add_argument('--blur', default='median',
help="blur algorithm. 'median' or 'gaussian. DEFAULT = median")
parser.add_argument('--blur_kernel', default='5',
help='size of kernel for blurring. DEFAULT = 5')
parser.add_argument('--output_dir', default='./output_images',
help='directory for the output images. DEFAULT = ./output_images')
args = parser.parse_args()
# check if file exists
if not os.path.isfile(args.img_path):
print('file not found')
sys.exit(2)
# check if dir exists, create if not
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# check resize factor
if (args.resize < 1) or (args.resize <= 0):
print('resize factor between 0 and 1')
sys.exit(2)
# check blur alg
if not (args.blur == "median" or args.blur == "gaussian"):
print('blur alg must be median or gaussian')
sys.exit(2)
# check kernel size
print "kernel: " + str(args.blur_kernel)
if (int(args.blur_kernel) % 2 == 0):
print('kernel size must be odd')
sys.exit(2)
# create instance of kMeans
print("all arguments have been read.")
inputImage = cv2.imread(args.img_path, cv2.IMREAD_UNCHANGED)
CB = simpleColorBalanceClass()
CB.thresholdAnalysis(inputImage, 1)
imageBalanced = CB.applyTrafo(inputImage)
KM = kMeanClass(imageBalanced, args.n_centers, args.blur, args.resize, args.blur_kernel)
cv2.namedWindow('input', flags=cv2.WINDOW_NORMAL)
cv2.imshow('input', inputImage)
cv2.namedWindow('balanced', flags=cv2.WINDOW_NORMAL)
cv2.imshow('balanced', imageBalanced)
cv2.waitKey(0)
cv2.destroyAllWindows()
KM.applyKM()
idxBlack, idxRed, idxYellow, idxWhite = KM.determineColor(True, KM.trained_centers)
trained_centers = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxRed],
KM.trained_centers[idxYellow], KM.trained_centers[idxWhite]])
print "the trained centers are: " + str(trained_centers)
KM.plotDeterminedCenters(KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite], KM.trained_centers[idxRed])
trained_centers_woRed = np.array([KM.trained_centers[idxBlack], KM.trained_centers[idxYellow],
KM.trained_centers[idxWhite]])
true_centers = np.vstack([[70, 50, 60], [50, 70, 240], [60, 240, 230], [250, 250, 250]])
outlierIndex, outlierCenter = detectOutlier(trained_centers, true_centers)
true_centers_woOutlier = np.delete(true_centers, outlierIndex, 0)
trained_centers_woOutlier = np.delete(trained_centers, outlierIndex, 0)
print "outlier center is: " + str(outlierCenter)
print("transform instance will be created!")
T = calcTransform(3, trained_centers_woOutlier, true_centers_woOutlier)
T.calcTransform()
# corr_img1 = scaleandshift2(KM.input_image, [1, 1, 1], [KM.shiftB, KM.shiftG, KM.shiftR])
corrected_img = scaleandshift2(KM.input_image, T.scale, T.shift)
corrected_image_cv2 = np.clip(
corrected_img, 0, 255).astype(np.uint8)
cv2.namedWindow('corrected', flags=cv2.WINDOW_NORMAL)
cv2.imshow('corrected', corrected_image_cv2)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == '__main__':
sys.exit(main())
"""
def batchExtraction(image, batchSideLength):
xSize, ySize, zSize = image.shape
xSizeNew = int(xSize / batchSideLength)
ySizeNew = int(ySize / batchSideLength)
newImage = np.zeros((xSizeNew,ySizeNew,zSize))
for i in range(xSizeNew):
for j in range(ySizeNew):
# create indices for the batches
xlow = i*batchSideLength
xhigh = (i+1)*batchSideLength
ylow = j*batchSideLength
yhigh = (j+1)*batchSideLength
if(i == (xSizeNew-1) ):
xhigh = xSize - 1
if(j == (ySizeNew - 1)):
yhigh = ySize -1
# average the batches
newImage[i, j, 0] = np.mean(image[xlow:xhigh, ylow:yhigh, 0])
newImage[i, j, 1] = np.mean(image[xlow:xhigh, ylow:yhigh, 1])
newImage[i, j, 2] = np.mean(image[xlow:xhigh, ylow:yhigh, 2])
return newImage
input_img = cv2.imread("test_images/pic3.jpg", cv2.IMREAD_UNCHANGED)
#input_img_converted = getimgdatapts(input_img)
#print(input_img_converted.shape)
width, height, channels = input_img.shape
trial = cv2.resize(input_img, (0, 0), fx=0.1, fy=0.1)
print(trial.shape)
# blur image using gaussian:
blurG = cv2.GaussianBlur(trial, (5,5), 0)
# blur image using median:
blurM = cv2.medianBlur(trial, 5)
# plot both blurred images
blurBoth = np.concatenate((blurG, blurM), axis=1)
# apply kmeans on blurred image:
# number of centers for kmeans
n_centers = 6
kmc = KMeans(n_clusters=n_centers, init='k-means++', max_iter=20)
trial_converted = getimgdatapts(blurM)
kmc.fit(trial_converted)
trained_centers = kmc.cluster_centers_
labels = kmc.labels_
# print centers and counts
labelcount = Counter()
for i in np.arange(n_centers):
labelcount[i] = np.sum(labels == i)
print(labelcount)
print(trained_centers)
print(kmc.cluster_centers_[1]/255)
str0 = tuple([kmc.cluster_centers_[0,2],kmc.cluster_centers_[0,1],kmc.cluster_centers_[0,0]])
str1 = tuple([kmc.cluster_centers_[1,2],kmc.cluster_centers_[1,1],kmc.cluster_centers_[1,0]])
str2 = tuple([kmc.cluster_centers_[2,2],kmc.cluster_centers_[2,1],kmc.cluster_centers_[2,0]])
str3 = tuple([kmc.cluster_centers_[3,2],kmc.cluster_centers_[3,1],kmc.cluster_centers_[3,0]])
str4 = tuple([kmc.cluster_centers_[4,2],kmc.cluster_centers_[4,1],kmc.cluster_centers_[4,0]])
str5 = tuple([kmc.cluster_centers_[5,2],kmc.cluster_centers_[5,1],kmc.cluster_centers_[5,0]])
print(str1)
image0 = np.zeros((200, 200, 3), np.uint8)
image0[:] = str0
image1 = np.zeros((200, 200, 3), np.uint8)
image1[:] = str1
image2 = np.zeros((200, 200, 3), np.uint8)
image2[:] = str2
image3 = np.zeros((200, 200, 3), np.uint8)
image3[:] = str3
image4 = np.zeros((200, 200, 3), np.uint8)
image4[:] = str4
image5 = np.zeros((200, 200, 3), np.uint8)
image5[:] = str5
labelArray = kmc.labels_
num0 = np.sum(labelArray==0)
num1 = np.sum(labelArray==1)
num2 = np.sum(labelArray==2)
num3 = np.sum(labelArray==3)
num4 = np.sum(labelArray==4)
num5 = np.sum(labelArray==5)
f, axarr = plt.subplots(3, 2)
axarr[0,0].imshow(image0)
axarr[0,0].axis('off')
axarr[0,0].set_title(str(num0))
axarr[0,1].imshow(image1)
axarr[0,1].axis('off')
axarr[0,1].set_title(str(num1))
axarr[1,0].imshow(image2)
axarr[1,0].axis('off')
axarr[1,0].set_title(str(num2))
axarr[1,1].imshow(image3)
axarr[1,1].axis('off')
axarr[1,1].set_title(str(num3))
axarr[2,0].imshow(image4)
axarr[2,0].axis('off')
axarr[2,0].set_title(str(num4))
axarr[2,1].imshow(image5)
axarr[2,1].axis('off')
axarr[2,1].set_title(str(num5))
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows()
for i in range(kmc.n_clusters):
print(np.sum(labelArray==i))
""" | en | 0.393176 | #!/usr/bin/env python # from .scale_and_shift import scaleandshift # from .scale_and_shift import scaleandshift2 This class gives the ability to use the kMeans alg. with different numbers of initial centers # initialize # set up array for center colors # re-shape input image for kMeans # blur image using median: # blur image using gaussian: # loop over all centers # get color # apply kMeans alg # resize image # blur image # self.blurred_image, self.shiftB, self.shiftG, self.shiftR = blackBalance(self.blurred_image) # prepare KMeans # try out color balance first # self.blurred_image = simplest_cb(self.blurred_image, 1) # percentages around 1% are normal # prepare data points # debug # run KMeans # get centers, labels and labelcount from KMeans # plot colors # define the true centers. This color is preset. The color transformation # tries to transform a picture such that the black areas will become true black. # The same applies for yellow, white and (if valid) red. # define and parse command line arguments # check if file exists # check if dir exists, create if not # check resize factor # check blur alg # check kernel size # create instance of kMeans # corr_img1 = scaleandshift2(KM.input_image, [1, 1, 1], [KM.shiftB, KM.shiftG, KM.shiftR]) def batchExtraction(image, batchSideLength): xSize, ySize, zSize = image.shape xSizeNew = int(xSize / batchSideLength) ySizeNew = int(ySize / batchSideLength) newImage = np.zeros((xSizeNew,ySizeNew,zSize)) for i in range(xSizeNew): for j in range(ySizeNew): # create indices for the batches xlow = i*batchSideLength xhigh = (i+1)*batchSideLength ylow = j*batchSideLength yhigh = (j+1)*batchSideLength if(i == (xSizeNew-1) ): xhigh = xSize - 1 if(j == (ySizeNew - 1)): yhigh = ySize -1 # average the batches newImage[i, j, 0] = np.mean(image[xlow:xhigh, ylow:yhigh, 0]) newImage[i, j, 1] = np.mean(image[xlow:xhigh, ylow:yhigh, 1]) newImage[i, j, 2] = np.mean(image[xlow:xhigh, ylow:yhigh, 2]) return newImage input_img = cv2.imread("test_images/pic3.jpg", cv2.IMREAD_UNCHANGED) #input_img_converted = getimgdatapts(input_img) #print(input_img_converted.shape) width, height, channels = input_img.shape trial = cv2.resize(input_img, (0, 0), fx=0.1, fy=0.1) print(trial.shape) # blur image using gaussian: blurG = cv2.GaussianBlur(trial, (5,5), 0) # blur image using median: blurM = cv2.medianBlur(trial, 5) # plot both blurred images blurBoth = np.concatenate((blurG, blurM), axis=1) # apply kmeans on blurred image: # number of centers for kmeans n_centers = 6 kmc = KMeans(n_clusters=n_centers, init='k-means++', max_iter=20) trial_converted = getimgdatapts(blurM) kmc.fit(trial_converted) trained_centers = kmc.cluster_centers_ labels = kmc.labels_ # print centers and counts labelcount = Counter() for i in np.arange(n_centers): labelcount[i] = np.sum(labels == i) print(labelcount) print(trained_centers) print(kmc.cluster_centers_[1]/255) str0 = tuple([kmc.cluster_centers_[0,2],kmc.cluster_centers_[0,1],kmc.cluster_centers_[0,0]]) str1 = tuple([kmc.cluster_centers_[1,2],kmc.cluster_centers_[1,1],kmc.cluster_centers_[1,0]]) str2 = tuple([kmc.cluster_centers_[2,2],kmc.cluster_centers_[2,1],kmc.cluster_centers_[2,0]]) str3 = tuple([kmc.cluster_centers_[3,2],kmc.cluster_centers_[3,1],kmc.cluster_centers_[3,0]]) str4 = tuple([kmc.cluster_centers_[4,2],kmc.cluster_centers_[4,1],kmc.cluster_centers_[4,0]]) str5 = tuple([kmc.cluster_centers_[5,2],kmc.cluster_centers_[5,1],kmc.cluster_centers_[5,0]]) print(str1) image0 = np.zeros((200, 200, 3), np.uint8) image0[:] = str0 image1 = np.zeros((200, 200, 3), np.uint8) image1[:] = str1 image2 = np.zeros((200, 200, 3), np.uint8) image2[:] = str2 image3 = np.zeros((200, 200, 3), np.uint8) image3[:] = str3 image4 = np.zeros((200, 200, 3), np.uint8) image4[:] = str4 image5 = np.zeros((200, 200, 3), np.uint8) image5[:] = str5 labelArray = kmc.labels_ num0 = np.sum(labelArray==0) num1 = np.sum(labelArray==1) num2 = np.sum(labelArray==2) num3 = np.sum(labelArray==3) num4 = np.sum(labelArray==4) num5 = np.sum(labelArray==5) f, axarr = plt.subplots(3, 2) axarr[0,0].imshow(image0) axarr[0,0].axis('off') axarr[0,0].set_title(str(num0)) axarr[0,1].imshow(image1) axarr[0,1].axis('off') axarr[0,1].set_title(str(num1)) axarr[1,0].imshow(image2) axarr[1,0].axis('off') axarr[1,0].set_title(str(num2)) axarr[1,1].imshow(image3) axarr[1,1].axis('off') axarr[1,1].set_title(str(num3)) axarr[2,0].imshow(image4) axarr[2,0].axis('off') axarr[2,0].set_title(str(num4)) axarr[2,1].imshow(image5) axarr[2,1].axis('off') axarr[2,1].set_title(str(num5)) plt.show() cv2.waitKey(0) cv2.destroyAllWindows() for i in range(kmc.n_clusters): print(np.sum(labelArray==i)) | 2.837708 | 3 |
Python/function.py | DarrentLearn/Rpi20181223 | 0 | 6618759 | <gh_stars>0
def fun():
print('fun')
pass;
fun()
def fun2():
return False
if fun2():
print ('True')
else:
print('False')
def fun3(param):
print(param)
fun3('This is fun3')
obj= None
if obj is None:
print("It's some thing")
else:
print("No thing")
def fun4(item1,item2,item3):
return{'a':item1,'B':item2,'C':item3}
print(fun4('aa','bb','cc'))
print('chang param location')
print(fun4(item2='bbb',item3='ccc',item1='aaa'))
def fun5 (p1,p2,p3='param3'):
'This is the function document'
return{'p1=':p1,'p2=':p2,'p3':p3}
print(fun5('param1','param2'))
print(fun5('param1a','param2a','param3a'))
print(fun5.__doc__)
| def fun():
print('fun')
pass;
fun()
def fun2():
return False
if fun2():
print ('True')
else:
print('False')
def fun3(param):
print(param)
fun3('This is fun3')
obj= None
if obj is None:
print("It's some thing")
else:
print("No thing")
def fun4(item1,item2,item3):
return{'a':item1,'B':item2,'C':item3}
print(fun4('aa','bb','cc'))
print('chang param location')
print(fun4(item2='bbb',item3='ccc',item1='aaa'))
def fun5 (p1,p2,p3='param3'):
'This is the function document'
return{'p1=':p1,'p2=':p2,'p3':p3}
print(fun5('param1','param2'))
print(fun5('param1a','param2a','param3a'))
print(fun5.__doc__) | none | 1 | 3.23707 | 3 | |
Bot/JsonEncoder.py | quickDESIGNnl/crypto-bot | 199 | 6618760 | from enum import Enum
from json import JSONEncoder
from datetime import datetime
from Bot.CustomSerializable import CustomSerializable
from Bot.Value import Value
class CustomJsonEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, Value):
return str(obj)
if isinstance(obj, CustomSerializable):
return obj.serializable_dict()
if isinstance(obj, float):
return format(obj, '.8f')
if isinstance(obj, Enum):
return obj.name
if isinstance(obj, datetime):
return obj.now().replace(microsecond=0).isoformat(' ')
return obj.__dict__
| from enum import Enum
from json import JSONEncoder
from datetime import datetime
from Bot.CustomSerializable import CustomSerializable
from Bot.Value import Value
class CustomJsonEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, Value):
return str(obj)
if isinstance(obj, CustomSerializable):
return obj.serializable_dict()
if isinstance(obj, float):
return format(obj, '.8f')
if isinstance(obj, Enum):
return obj.name
if isinstance(obj, datetime):
return obj.now().replace(microsecond=0).isoformat(' ')
return obj.__dict__
| none | 1 | 2.735563 | 3 | |
basiclive/auth/cas/apps.py | znarthur/basic-live | 0 | 6618761 | <gh_stars>0
from django.apps import AppConfig
class CASAuthConfig(AppConfig):
name = 'auth.cas'
verbose_name = 'CAS Authentication'
| from django.apps import AppConfig
class CASAuthConfig(AppConfig):
name = 'auth.cas'
verbose_name = 'CAS Authentication' | none | 1 | 1.128321 | 1 | |
lib/systems/fluorene.py | pulsar-chem/BPModule | 0 | 6618762 | import pulsar as psr
def load_ref_system():
""" Returns fluorene as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.00000 -1.85620 0.00000
C 0.00000 -0.87905 -1.15198
C 0.00000 0.43021 -0.71802
C -0.00000 0.43021 0.71802
C -0.00000 -0.87905 1.15198
C 0.00000 -1.17905 -2.51475
C 0.00000 -0.12014 -3.43941
C 0.00000 1.21645 -2.98978
C 0.00000 1.49951 -1.61401
C -0.00000 1.49951 1.61401
C -0.00000 1.21645 2.98978
C -0.00000 -0.12014 3.43941
C -0.00000 -1.17905 2.51475
H 0.91474 -2.48566 0.00000
H -0.91474 -2.48566 0.00000
H 0.00000 -2.20677 -2.85399
H 0.00000 -0.33175 -4.50072
H 0.00000 2.02688 -3.70702
H 0.00000 2.52267 -1.26186
H 0.00000 2.52267 1.26186
H -0.00000 2.02688 3.70702
H -0.00000 -0.33175 4.50072
H -0.00000 -2.20677 2.85399
""")
| import pulsar as psr
def load_ref_system():
""" Returns fluorene as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -0.00000 -1.85620 0.00000
C 0.00000 -0.87905 -1.15198
C 0.00000 0.43021 -0.71802
C -0.00000 0.43021 0.71802
C -0.00000 -0.87905 1.15198
C 0.00000 -1.17905 -2.51475
C 0.00000 -0.12014 -3.43941
C 0.00000 1.21645 -2.98978
C 0.00000 1.49951 -1.61401
C -0.00000 1.49951 1.61401
C -0.00000 1.21645 2.98978
C -0.00000 -0.12014 3.43941
C -0.00000 -1.17905 2.51475
H 0.91474 -2.48566 0.00000
H -0.91474 -2.48566 0.00000
H 0.00000 -2.20677 -2.85399
H 0.00000 -0.33175 -4.50072
H 0.00000 2.02688 -3.70702
H 0.00000 2.52267 -1.26186
H 0.00000 2.52267 1.26186
H -0.00000 2.02688 3.70702
H -0.00000 -0.33175 4.50072
H -0.00000 -2.20677 2.85399
""")
| en | 0.295424 | Returns fluorene as found in the IQMol fragment library. All credit to https://github.com/nutjunkie/IQmol C -0.00000 -1.85620 0.00000 C 0.00000 -0.87905 -1.15198 C 0.00000 0.43021 -0.71802 C -0.00000 0.43021 0.71802 C -0.00000 -0.87905 1.15198 C 0.00000 -1.17905 -2.51475 C 0.00000 -0.12014 -3.43941 C 0.00000 1.21645 -2.98978 C 0.00000 1.49951 -1.61401 C -0.00000 1.49951 1.61401 C -0.00000 1.21645 2.98978 C -0.00000 -0.12014 3.43941 C -0.00000 -1.17905 2.51475 H 0.91474 -2.48566 0.00000 H -0.91474 -2.48566 0.00000 H 0.00000 -2.20677 -2.85399 H 0.00000 -0.33175 -4.50072 H 0.00000 2.02688 -3.70702 H 0.00000 2.52267 -1.26186 H 0.00000 2.52267 1.26186 H -0.00000 2.02688 3.70702 H -0.00000 -0.33175 4.50072 H -0.00000 -2.20677 2.85399 | 2.052953 | 2 |
Project07, Beginner, caesar ciphone/main.py | lvrbanec/100DaysOfCode_Python | 0 | 6618763 | # 02.02.2021, Frollo
# project: create Casear Cipher
# level Beginner
from art import logo
print(logo)
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# define a function for encryption
def caesar(word, shift_ammount, direction):
encrypted_word = ''
if direction == 'decode':
shift_ammount *= -1 # substract the position if decoding
for char in word:
if char in alphabet:
position = alphabet.index(char) # gives only the first index
encrypted_letter = alphabet[position + shift_ammount]
encrypted_word += encrypted_letter
else:
encrypted_word += char
print(f"The {direction}d text is '{encrypted_word}'.")
should_continue = True # run again?
while should_continue:
# give input to encript
desired_direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
# if the user imputs huge number find the one that corresponds to alphabet, 26 letters our alphabet
shift = shift % 26
# call function
caesar(word = text, shift_ammount = shift, direction = desired_direction )
# run again?
run_again = input("Type 'yes' if you want to run again. Otherwise type 'no'\n.")
if run_again == "no":
should_continue = False
print("Goodbye") | # 02.02.2021, Frollo
# project: create Casear Cipher
# level Beginner
from art import logo
print(logo)
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# define a function for encryption
def caesar(word, shift_ammount, direction):
encrypted_word = ''
if direction == 'decode':
shift_ammount *= -1 # substract the position if decoding
for char in word:
if char in alphabet:
position = alphabet.index(char) # gives only the first index
encrypted_letter = alphabet[position + shift_ammount]
encrypted_word += encrypted_letter
else:
encrypted_word += char
print(f"The {direction}d text is '{encrypted_word}'.")
should_continue = True # run again?
while should_continue:
# give input to encript
desired_direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n")
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
# if the user imputs huge number find the one that corresponds to alphabet, 26 letters our alphabet
shift = shift % 26
# call function
caesar(word = text, shift_ammount = shift, direction = desired_direction )
# run again?
run_again = input("Type 'yes' if you want to run again. Otherwise type 'no'\n.")
if run_again == "no":
should_continue = False
print("Goodbye") | en | 0.59918 | # 02.02.2021, Frollo # project: create Casear Cipher # level Beginner # define a function for encryption # substract the position if decoding # gives only the first index # run again? # give input to encript # if the user imputs huge number find the one that corresponds to alphabet, 26 letters our alphabet # call function # run again? | 4.158079 | 4 |
utils/dataset.py | dong-jf15/TransformerTranslationModel | 2 | 6618764 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.tokenizer import *
from mxnet.gluon.data import dataset
from six.moves import xrange
import mxnet as mx
import mxnet.ndarray as nd
import random
_FILE_SHUFFLE_BUFFER = 100
_READ_RECORD_BUFFER = 8 * 1000 * 1000
_MIN_BOUNDARY = 8
_BOUNDARY_SCALE = 1.1
class TranslationDataset(dataset.Dataset):
"""
A dataset which load sentences from files and decode them to int arrays.
"""
def __init__(self, dir_lang1, dir_lang2, subtokenizer):
self._dir_lang1 = dir_lang1
self._dir_lang2 = dir_lang2
self._list = []
self._subtokenizer = subtokenizer
self._current_idx = 0
with open(dir_lang1) as f:
self._lang1 = f.readlines()
f.close()
with open(dir_lang2) as f:
self._lang2 = f.readlines()
f.close()
for i in xrange(len(self._lang1)):
self._list.append({'input': self._subtokenizer.encode(self._lang1[i], add_eos=True),
'targets': self._subtokenizer.encode(self._lang2[i], add_eos=True)})
def __getitem__(self, idx):
if idx >= len(self._list):
print('Cant find the item, idx too big')
if idx < 0:
print('Cant find the item, idx less than zero')
return self._list[idx]
def __len__(self):
return len(self._list)
def get_mini_batch(self, batch_size=10):
"""Get a mini batch for training"""
if (self._current_idx + batch_size) > len(self._list):
random.shuffle(self._list)
self._current_idx = 0
max_length_input = 0
max_length_targets = 0
for i in xrange(0, batch_size, 1):
if len(self._list[self._current_idx+i]['input']) > max_length_input:
max_length_input = len(self._list[self._current_idx+i]['input'])
if len(self._list[self._current_idx+i]['targets']) > max_length_targets:
max_length_targets = len(self._list[self._current_idx+i]['targets'])
input = []
targets = []
for i in xrange(0, batch_size, 1):
length_input = len(self._list[self._current_idx + i]['input'])
add_input = np.append(np.array(self._list[self._current_idx + i]['input']),
np.zeros(max_length_input - length_input))
input.append(add_input)
length_targets = len(self._list[self._current_idx + i]['targets'])
add_targets = np.append(np.array(self._list[self._current_idx + i]['targets']),
np.zeros(max_length_targets - length_targets))
targets.append(add_targets)
self._current_idx = self._current_idx + batch_size
return {'input': nd.array(input), 'targets': nd.array(targets)}
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.tokenizer import *
from mxnet.gluon.data import dataset
from six.moves import xrange
import mxnet as mx
import mxnet.ndarray as nd
import random
_FILE_SHUFFLE_BUFFER = 100
_READ_RECORD_BUFFER = 8 * 1000 * 1000
_MIN_BOUNDARY = 8
_BOUNDARY_SCALE = 1.1
class TranslationDataset(dataset.Dataset):
"""
A dataset which load sentences from files and decode them to int arrays.
"""
def __init__(self, dir_lang1, dir_lang2, subtokenizer):
self._dir_lang1 = dir_lang1
self._dir_lang2 = dir_lang2
self._list = []
self._subtokenizer = subtokenizer
self._current_idx = 0
with open(dir_lang1) as f:
self._lang1 = f.readlines()
f.close()
with open(dir_lang2) as f:
self._lang2 = f.readlines()
f.close()
for i in xrange(len(self._lang1)):
self._list.append({'input': self._subtokenizer.encode(self._lang1[i], add_eos=True),
'targets': self._subtokenizer.encode(self._lang2[i], add_eos=True)})
def __getitem__(self, idx):
if idx >= len(self._list):
print('Cant find the item, idx too big')
if idx < 0:
print('Cant find the item, idx less than zero')
return self._list[idx]
def __len__(self):
return len(self._list)
def get_mini_batch(self, batch_size=10):
"""Get a mini batch for training"""
if (self._current_idx + batch_size) > len(self._list):
random.shuffle(self._list)
self._current_idx = 0
max_length_input = 0
max_length_targets = 0
for i in xrange(0, batch_size, 1):
if len(self._list[self._current_idx+i]['input']) > max_length_input:
max_length_input = len(self._list[self._current_idx+i]['input'])
if len(self._list[self._current_idx+i]['targets']) > max_length_targets:
max_length_targets = len(self._list[self._current_idx+i]['targets'])
input = []
targets = []
for i in xrange(0, batch_size, 1):
length_input = len(self._list[self._current_idx + i]['input'])
add_input = np.append(np.array(self._list[self._current_idx + i]['input']),
np.zeros(max_length_input - length_input))
input.append(add_input)
length_targets = len(self._list[self._current_idx + i]['targets'])
add_targets = np.append(np.array(self._list[self._current_idx + i]['targets']),
np.zeros(max_length_targets - length_targets))
targets.append(add_targets)
self._current_idx = self._current_idx + batch_size
return {'input': nd.array(input), 'targets': nd.array(targets)}
| en | 0.896101 | A dataset which load sentences from files and decode them to int arrays. Get a mini batch for training | 2.414693 | 2 |
matador/hull/hull_ensemble.py | dquigley-warwick/matador | 24 | 6618765 | <gh_stars>10-100
# coding: utf-8
# Distributed under the terms of the MIT License.
""" This submodule implements the base class for parameterised phase diagrams. """
import tqdm
from matador.hull import PhaseDiagram, QueryConvexHull
from matador.utils.cursor_utils import filter_cursor_by_chempots, recursive_get, recursive_set, set_cursor_from_array
from matador.utils.chem_utils import get_formation_energy, get_root_source, get_formula_from_stoich
class EnsembleHull(QueryConvexHull):
""" Class to create and store an ensemble of composition vs energy convex
hulls from cursor data. The variable energies must be stored under a given
key, e.g. `doc['_beef'][energy_key][beef_index]`, as specified by init.
Data must be stored in the following way under each document in cursor::
{...,
data_key: {
parameter_key: "<list of parameter values>",
energy_key: "<list of energies at parameter values>",
},
}
Hull data will be stored as arrays per document under
``doc[data_key]['hull_distance']``
and ``doc[data_key]['formation_' + energy_key]``.
Inherits the attributes of matador.hull.QueryConvexHull, with many set to
None.
Attributes:
phase_diagrams (list of :obj:`matador.hull.PhaseDiagram`): list of phase diagram
objects for each parameter value.
"""
def __init__(self, cursor, data_key, energy_key='enthalpy_per_atom', chempot_energy_key=None,
num_samples=None, parameter_key=None, species=None, voltage=False, verbosity=None, **kwargs):
""" Initialise EnsembleHull from a cursor, with other keywords
following QueryConvexHull.
Parameters:
cursor (list[dict]): list of matador documents containing
variable parameter data for energies.
data_key (str): the key under which all parameter data is
stored to the variable parameter, e.g. `_beef` or `_temperature`.
Keyword arguments:
energy_key (str): the key under `parameter_key` to use to create
the hulls.
chempot_energy_key (str): the key used to create the first convex hull.
parameter_key (str): the key pertaining to the variable parameter
itself, e.g. `temperature` or `thetas`.
num_samples (int): use up to this many samples in creating the hull.
species (list[str]): list of elements/chempots to use, in
the desired order.
voltage (bool): whether or not to compute voltage curves.
plot_kwargs (dict): arguments to pass to plot_hull function.
kwargs (dict): other arguments to pass to QueryConvexHull.
"""
# sometimes the first hull needs to be made with a different key
if chempot_energy_key is not None:
self.chempot_energy_key = chempot_energy_key
else:
self.chempot_energy_key = energy_key
super().__init__(cursor=cursor,
energy_key=self.chempot_energy_key,
species=species,
voltage=voltage,
no_plot=True,
lazy=False,
**kwargs)
self.energy_key = energy_key
if self.phase_diagram is None:
del self.phase_diagram
if self.hull_dist is None:
del self.hull_dist
self.from_cursor = True
self.verbosity = verbosity
# set up relative keys
self.formation_key = 'formation_' + self.energy_key
self.data_key = data_key
self.parameter_key = parameter_key
if self.parameter_key is None:
self._parameter_keys = None
else:
self._parameter_keys = [self.data_key] + [parameter_key]
self._formation_keys = [self.data_key] + [self.formation_key]
self._hulldist_keys = [self.data_key] + ['hull_distance']
self._energy_keys = [self.data_key] + [self.energy_key]
self.phase_diagrams = []
self.set_chempots(energy_key=self.chempot_energy_key)
self.cursor = filter_cursor_by_chempots(self.species, self.cursor)
self.cursor = sorted(self.cursor, key=lambda doc: (recursive_get(doc, self.chempot_energy_key), doc['concentration']))
if self.parameter_key is None:
parameter_iterable = recursive_get(self.chempot_cursor[0], self._energy_keys)
_keys = self._energy_keys
else:
parameter_iterable = recursive_get(self.chempot_cursor[0], self._parameter_keys)
_keys = self.parameter_key
if parameter_iterable is None:
raise RuntimeError(
f"Could not find any data for keys {_keys} in {self.chempot_cursor[0]}."
)
print(f"Found {len(parameter_iterable)} entries under data key: {self.data_key}.")
# allocate formation energy and hull distance arrays
for ind, doc in enumerate(self.cursor):
recursive_set(doc, self._formation_keys, [None] * len(recursive_get(doc, self._energy_keys)))
recursive_set(doc, self._hulldist_keys, [None] * len(recursive_get(doc, self._energy_keys)))
n_hulls = len(parameter_iterable)
if num_samples is not None:
parameter_iterable = parameter_iterable[:num_samples]
print(f"Using {num_samples} out of {n_hulls} possible phase diagrams.")
else:
num_samples = n_hulls
for param_ind, parameter in enumerate(tqdm.tqdm(parameter_iterable)):
for ind, doc in enumerate(self.cursor):
if self.parameter_key is not None:
assert recursive_get(doc, self._parameter_keys + [param_ind]) == parameter
formation_energy = get_formation_energy(self.chempot_cursor, doc,
energy_key=self._energy_keys + [param_ind])
recursive_set(self.cursor[ind], self._formation_keys + [param_ind], formation_energy)
self.phase_diagrams.append(PhaseDiagram(self.cursor,
self._formation_keys + [param_ind],
self._dimension))
set_cursor_from_array(self.cursor,
self.phase_diagrams[-1].hull_dist,
self._hulldist_keys + [param_ind])
self.stability_histogram = self.generate_stability_statistics()
def generate_stability_statistics(self, group_by='structure'):
""" Creates a histogram that counts how many times each structure
is found to be stable in the ensemble.
Keyword arguments:
group_by (str): either 'structure' or 'formula' for bar groupings.
"""
from collections import defaultdict
histogram = defaultdict(int)
for pd in self.phase_diagrams:
for doc in pd.stable_structures:
if group_by == 'formula':
histogram[get_formula_from_stoich(doc['stoichiometry'])] += 1
else:
histogram[get_root_source(doc)] += 1
return histogram
def plot_hull(self, **kwargs):
""" Hull plot helper function. """
from matador.plotting.hull_plotting import plot_ensemble_hull
return plot_ensemble_hull(self, self.data_key, formation_energy_key=self.formation_key, **kwargs)
| # coding: utf-8
# Distributed under the terms of the MIT License.
""" This submodule implements the base class for parameterised phase diagrams. """
import tqdm
from matador.hull import PhaseDiagram, QueryConvexHull
from matador.utils.cursor_utils import filter_cursor_by_chempots, recursive_get, recursive_set, set_cursor_from_array
from matador.utils.chem_utils import get_formation_energy, get_root_source, get_formula_from_stoich
class EnsembleHull(QueryConvexHull):
""" Class to create and store an ensemble of composition vs energy convex
hulls from cursor data. The variable energies must be stored under a given
key, e.g. `doc['_beef'][energy_key][beef_index]`, as specified by init.
Data must be stored in the following way under each document in cursor::
{...,
data_key: {
parameter_key: "<list of parameter values>",
energy_key: "<list of energies at parameter values>",
},
}
Hull data will be stored as arrays per document under
``doc[data_key]['hull_distance']``
and ``doc[data_key]['formation_' + energy_key]``.
Inherits the attributes of matador.hull.QueryConvexHull, with many set to
None.
Attributes:
phase_diagrams (list of :obj:`matador.hull.PhaseDiagram`): list of phase diagram
objects for each parameter value.
"""
def __init__(self, cursor, data_key, energy_key='enthalpy_per_atom', chempot_energy_key=None,
num_samples=None, parameter_key=None, species=None, voltage=False, verbosity=None, **kwargs):
""" Initialise EnsembleHull from a cursor, with other keywords
following QueryConvexHull.
Parameters:
cursor (list[dict]): list of matador documents containing
variable parameter data for energies.
data_key (str): the key under which all parameter data is
stored to the variable parameter, e.g. `_beef` or `_temperature`.
Keyword arguments:
energy_key (str): the key under `parameter_key` to use to create
the hulls.
chempot_energy_key (str): the key used to create the first convex hull.
parameter_key (str): the key pertaining to the variable parameter
itself, e.g. `temperature` or `thetas`.
num_samples (int): use up to this many samples in creating the hull.
species (list[str]): list of elements/chempots to use, in
the desired order.
voltage (bool): whether or not to compute voltage curves.
plot_kwargs (dict): arguments to pass to plot_hull function.
kwargs (dict): other arguments to pass to QueryConvexHull.
"""
# sometimes the first hull needs to be made with a different key
if chempot_energy_key is not None:
self.chempot_energy_key = chempot_energy_key
else:
self.chempot_energy_key = energy_key
super().__init__(cursor=cursor,
energy_key=self.chempot_energy_key,
species=species,
voltage=voltage,
no_plot=True,
lazy=False,
**kwargs)
self.energy_key = energy_key
if self.phase_diagram is None:
del self.phase_diagram
if self.hull_dist is None:
del self.hull_dist
self.from_cursor = True
self.verbosity = verbosity
# set up relative keys
self.formation_key = 'formation_' + self.energy_key
self.data_key = data_key
self.parameter_key = parameter_key
if self.parameter_key is None:
self._parameter_keys = None
else:
self._parameter_keys = [self.data_key] + [parameter_key]
self._formation_keys = [self.data_key] + [self.formation_key]
self._hulldist_keys = [self.data_key] + ['hull_distance']
self._energy_keys = [self.data_key] + [self.energy_key]
self.phase_diagrams = []
self.set_chempots(energy_key=self.chempot_energy_key)
self.cursor = filter_cursor_by_chempots(self.species, self.cursor)
self.cursor = sorted(self.cursor, key=lambda doc: (recursive_get(doc, self.chempot_energy_key), doc['concentration']))
if self.parameter_key is None:
parameter_iterable = recursive_get(self.chempot_cursor[0], self._energy_keys)
_keys = self._energy_keys
else:
parameter_iterable = recursive_get(self.chempot_cursor[0], self._parameter_keys)
_keys = self.parameter_key
if parameter_iterable is None:
raise RuntimeError(
f"Could not find any data for keys {_keys} in {self.chempot_cursor[0]}."
)
print(f"Found {len(parameter_iterable)} entries under data key: {self.data_key}.")
# allocate formation energy and hull distance arrays
for ind, doc in enumerate(self.cursor):
recursive_set(doc, self._formation_keys, [None] * len(recursive_get(doc, self._energy_keys)))
recursive_set(doc, self._hulldist_keys, [None] * len(recursive_get(doc, self._energy_keys)))
n_hulls = len(parameter_iterable)
if num_samples is not None:
parameter_iterable = parameter_iterable[:num_samples]
print(f"Using {num_samples} out of {n_hulls} possible phase diagrams.")
else:
num_samples = n_hulls
for param_ind, parameter in enumerate(tqdm.tqdm(parameter_iterable)):
for ind, doc in enumerate(self.cursor):
if self.parameter_key is not None:
assert recursive_get(doc, self._parameter_keys + [param_ind]) == parameter
formation_energy = get_formation_energy(self.chempot_cursor, doc,
energy_key=self._energy_keys + [param_ind])
recursive_set(self.cursor[ind], self._formation_keys + [param_ind], formation_energy)
self.phase_diagrams.append(PhaseDiagram(self.cursor,
self._formation_keys + [param_ind],
self._dimension))
set_cursor_from_array(self.cursor,
self.phase_diagrams[-1].hull_dist,
self._hulldist_keys + [param_ind])
self.stability_histogram = self.generate_stability_statistics()
def generate_stability_statistics(self, group_by='structure'):
""" Creates a histogram that counts how many times each structure
is found to be stable in the ensemble.
Keyword arguments:
group_by (str): either 'structure' or 'formula' for bar groupings.
"""
from collections import defaultdict
histogram = defaultdict(int)
for pd in self.phase_diagrams:
for doc in pd.stable_structures:
if group_by == 'formula':
histogram[get_formula_from_stoich(doc['stoichiometry'])] += 1
else:
histogram[get_root_source(doc)] += 1
return histogram
def plot_hull(self, **kwargs):
""" Hull plot helper function. """
from matador.plotting.hull_plotting import plot_ensemble_hull
return plot_ensemble_hull(self, self.data_key, formation_energy_key=self.formation_key, **kwargs) | en | 0.584811 | # coding: utf-8 # Distributed under the terms of the MIT License. This submodule implements the base class for parameterised phase diagrams. Class to create and store an ensemble of composition vs energy convex hulls from cursor data. The variable energies must be stored under a given key, e.g. `doc['_beef'][energy_key][beef_index]`, as specified by init. Data must be stored in the following way under each document in cursor:: {..., data_key: { parameter_key: "<list of parameter values>", energy_key: "<list of energies at parameter values>", }, } Hull data will be stored as arrays per document under ``doc[data_key]['hull_distance']`` and ``doc[data_key]['formation_' + energy_key]``. Inherits the attributes of matador.hull.QueryConvexHull, with many set to None. Attributes: phase_diagrams (list of :obj:`matador.hull.PhaseDiagram`): list of phase diagram objects for each parameter value. Initialise EnsembleHull from a cursor, with other keywords following QueryConvexHull. Parameters: cursor (list[dict]): list of matador documents containing variable parameter data for energies. data_key (str): the key under which all parameter data is stored to the variable parameter, e.g. `_beef` or `_temperature`. Keyword arguments: energy_key (str): the key under `parameter_key` to use to create the hulls. chempot_energy_key (str): the key used to create the first convex hull. parameter_key (str): the key pertaining to the variable parameter itself, e.g. `temperature` or `thetas`. num_samples (int): use up to this many samples in creating the hull. species (list[str]): list of elements/chempots to use, in the desired order. voltage (bool): whether or not to compute voltage curves. plot_kwargs (dict): arguments to pass to plot_hull function. kwargs (dict): other arguments to pass to QueryConvexHull. # sometimes the first hull needs to be made with a different key # set up relative keys # allocate formation energy and hull distance arrays Creates a histogram that counts how many times each structure is found to be stable in the ensemble. Keyword arguments: group_by (str): either 'structure' or 'formula' for bar groupings. Hull plot helper function. | 2.364652 | 2 |
objects/CSCG/_3d/mesh/domain/regions/region/types_wrt_metric/crazy.py | mathischeap/mifem | 1 | 6618766 |
import numpy as np
from objects.CSCG._3d.mesh.domain.regions.region.types_wrt_metric.base import TypeWr2MetricBase
from objects.CSCG._3d.mesh.elements.element.types_wrt_metric.chaotic import ChaoticElement
from objects.CSCG._3d.mesh.elements.element.types_wrt_metric.orthogonal import OrthogonalElement
from objects.CSCG._3d.mesh.trace.elements.element.types_wrt_metric.chaotic import ChaoticTraceElement
from objects.CSCG._3d.mesh.trace.elements.element.types_wrt_metric.orthogonal import OrthogonalTraceElement
from typing import Union
class Crazy(TypeWr2MetricBase):
"""The crazy regions is the regions the crazy mesh uses."""
def __init__(self, region):
super().__init__(region)
self._c_ = self._region_._domain_input_.c
bounds = self._region_._domain_input_.bounds
x0, x1 = bounds[0]
y0, y1 = bounds[1]
z0, z1 = bounds[2]
self._Lxyz_ = (x1-x0, y1-y0, z1-z0)
self._freeze_self_()
@property
def mark(self):
if self._mark_ is None:
self._mark_ = 'crazy:Lx{}_Ly{}_Lz{}~c{}'.format(
'%.8f' % self._Lxyz_[0], '%.8f' % self._Lxyz_[1], '%.8f' % self._Lxyz_[2], '%.5f' % self._c_
)
return self._mark_
def ___CLASSIFY_ELEMENT_of_spacing___(self, spacing: tuple) -> Union[ChaoticElement, OrthogonalElement]:
assert np.shape(spacing) == (3,2), "I need a spacing of shape (3,2) to represent an element in a regions."
assert all([0 <= spacing[i][0] < spacing[i][1] <= 1 for i in range(3)]), f"spacing={spacing} is wrong."
if self._c_ == 0:
LxLyLz = [(spacing[i][1] - spacing[i][0]) * self._Lxyz_[i] for i in range(3)]
return OrthogonalElement(LxLyLz)
else:
return ChaoticElement()
def ___CLASSIFY_TRACE_ELEMENT_of_spacing___(self, trace_spacing: tuple) -> Union[ChaoticTraceElement, OrthogonalTraceElement]:
"""
:param trace_spacing: the trace_spacing representing a trace element.
:return:
"""
assert len(trace_spacing) == 3, "It is not a trace spacing."
s0, s1, s2 = trace_spacing
if isinstance(s0, float):
perp_to = 'x'
d1, d2 = s1[1]-s1[0], s2[1]-s2[0]
elif isinstance(s1, float):
perp_to = 'y'
d1, d2 = s2[1]-s2[0], s0[1]-s0[0]
elif isinstance(s2, float):
perp_to = 'z'
d1, d2 = s0[1]-s0[0], s1[1]-s1[0]
else:
raise Exception()
if self._c_ == 0:
return OrthogonalTraceElement(perp_to, d1, d2)
else:
return ChaoticTraceElement()
|
import numpy as np
from objects.CSCG._3d.mesh.domain.regions.region.types_wrt_metric.base import TypeWr2MetricBase
from objects.CSCG._3d.mesh.elements.element.types_wrt_metric.chaotic import ChaoticElement
from objects.CSCG._3d.mesh.elements.element.types_wrt_metric.orthogonal import OrthogonalElement
from objects.CSCG._3d.mesh.trace.elements.element.types_wrt_metric.chaotic import ChaoticTraceElement
from objects.CSCG._3d.mesh.trace.elements.element.types_wrt_metric.orthogonal import OrthogonalTraceElement
from typing import Union
class Crazy(TypeWr2MetricBase):
"""The crazy regions is the regions the crazy mesh uses."""
def __init__(self, region):
super().__init__(region)
self._c_ = self._region_._domain_input_.c
bounds = self._region_._domain_input_.bounds
x0, x1 = bounds[0]
y0, y1 = bounds[1]
z0, z1 = bounds[2]
self._Lxyz_ = (x1-x0, y1-y0, z1-z0)
self._freeze_self_()
@property
def mark(self):
if self._mark_ is None:
self._mark_ = 'crazy:Lx{}_Ly{}_Lz{}~c{}'.format(
'%.8f' % self._Lxyz_[0], '%.8f' % self._Lxyz_[1], '%.8f' % self._Lxyz_[2], '%.5f' % self._c_
)
return self._mark_
def ___CLASSIFY_ELEMENT_of_spacing___(self, spacing: tuple) -> Union[ChaoticElement, OrthogonalElement]:
assert np.shape(spacing) == (3,2), "I need a spacing of shape (3,2) to represent an element in a regions."
assert all([0 <= spacing[i][0] < spacing[i][1] <= 1 for i in range(3)]), f"spacing={spacing} is wrong."
if self._c_ == 0:
LxLyLz = [(spacing[i][1] - spacing[i][0]) * self._Lxyz_[i] for i in range(3)]
return OrthogonalElement(LxLyLz)
else:
return ChaoticElement()
def ___CLASSIFY_TRACE_ELEMENT_of_spacing___(self, trace_spacing: tuple) -> Union[ChaoticTraceElement, OrthogonalTraceElement]:
"""
:param trace_spacing: the trace_spacing representing a trace element.
:return:
"""
assert len(trace_spacing) == 3, "It is not a trace spacing."
s0, s1, s2 = trace_spacing
if isinstance(s0, float):
perp_to = 'x'
d1, d2 = s1[1]-s1[0], s2[1]-s2[0]
elif isinstance(s1, float):
perp_to = 'y'
d1, d2 = s2[1]-s2[0], s0[1]-s0[0]
elif isinstance(s2, float):
perp_to = 'z'
d1, d2 = s0[1]-s0[0], s1[1]-s1[0]
else:
raise Exception()
if self._c_ == 0:
return OrthogonalTraceElement(perp_to, d1, d2)
else:
return ChaoticTraceElement()
| en | 0.724383 | The crazy regions is the regions the crazy mesh uses. :param trace_spacing: the trace_spacing representing a trace element. :return: | 2.390671 | 2 |
scenario_generation/OLD/conditional-GAN.py | ronamit/l5kit | 1 | 6618767 | <filename>scenario_generation/OLD/conditional-GAN.py
'''
https://github.com/TeeyoHuang/conditional-GAN/blob/master/conditional_gan.py
'''
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
img_save_path = 'images'
os.makedirs(img_save_path, exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--beta1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--beta2', type=float, default=0.999, help='adam: decay of second order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes for dataset')
parser.add_argument('--img_size', type=int, default=28, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=1, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=200, help='interval between image sampling')
args = parser.parse_args()
print(args)
C,H,W = args.channels, args.img_size, args.img_size
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal(m.weight, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal(m.weight, 1.0, 0.02)
torch.nn.init.constant(m.bias, 0.0)
class Generator(nn.Module):
# initializers
def __init__(self):
super(Generator, self).__init__()
self.fc1_1 = nn.Linear(100, 256)
self.fc1_1_bn = nn.BatchNorm1d(256)
self.fc1_2 = nn.Linear(10, 256)
self.fc1_2_bn = nn.BatchNorm1d(256)
self.fc2 = nn.Linear(512, 512)
self.fc2_bn = nn.BatchNorm1d(512)
self.fc3 = nn.Linear(512, 1024)
self.fc3_bn = nn.BatchNorm1d(1024)
self.fc4 = nn.Linear(1024, H*W)
# forward method
def forward(self, input, label):
x = F.relu(self.fc1_1_bn(self.fc1_1(input)))
y = F.relu(self.fc1_2_bn(self.fc1_2(label)))
x = torch.cat([x, y], 1)
x = F.relu(self.fc2_bn(self.fc2(x)))
x = F.relu(self.fc3_bn(self.fc3(x)))
x = F.tanh(self.fc4(x))
return x
class Discriminator(nn.Module):
# initializers
def __init__(self):
super(Discriminator, self).__init__()
self.fc1_1 = nn.Linear(H*W, 1024)
self.fc1_2 = nn.Linear(10, 1024)
self.fc2 = nn.Linear(2048, 512)
self.fc2_bn = nn.BatchNorm1d(512)
self.fc3 = nn.Linear(512, 256)
self.fc3_bn = nn.BatchNorm1d(256)
self.fc4 = nn.Linear(256, 1)
# forward method
def forward(self, input, label):
x = F.leaky_relu(self.fc1_1(input.view(input.size(0),-1)), 0.2)
y = F.leaky_relu(self.fc1_2(label), 0.2)
x = torch.cat([x, y], 1)
x = F.leaky_relu(self.fc2_bn(self.fc2(x)), 0.2)
x = F.leaky_relu(self.fc3_bn(self.fc3(x)), 0.2)
x = F.sigmoid(self.fc4(x))
return x
# Loss function
adversarial_loss = torch.nn.BCELoss()
# Initialize Generator and discriminator
generator = Generator()
discriminator = Discriminator()
if torch.cuda.is_available():
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data', exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST('../../data', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(args.img_size),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])),
batch_size=args.batch_size, shuffle=True, drop_last=True)
print('the data is ok')
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
batches_done=0
for epoch in range(args.n_epochs):
for i, (imgs, labels) in enumerate(dataloader):
Batch_Size = args.batch_size
N_Class = args.n_classes
# Adversarial ground truths
valid = Variable(torch.ones(Batch_Size).cuda(), requires_grad=False)
fake = Variable(torch.zeros(Batch_Size).cuda(), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(torch.FloatTensor).cuda())
real_y = torch.zeros(Batch_Size, N_Class)
real_y = Variable(real_y.scatter_(1, labels.view(Batch_Size, 1), 1).cuda())
#y = Variable(y.cuda())
# Sample noise and labels as generator input
noise = Variable(torch.randn((Batch_Size, args.latent_dim)).cuda())
gen_labels = (torch.rand(Batch_Size, 1) * N_Class).type(torch.LongTensor)
gen_y = torch.zeros(Batch_Size, N_Class)
gen_y = Variable(gen_y.scatter_(1, gen_labels.view(Batch_Size, 1), 1).cuda())
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Loss for real images
d_real_loss = adversarial_loss(discriminator(real_imgs, real_y).squeeze(), valid)
# Loss for fake images
gen_imgs = generator(noise, gen_y)
d_fake_loss = adversarial_loss(discriminator(gen_imgs.detach(),gen_y).squeeze(), fake)
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss)
d_loss.backward()
optimizer_D.step()
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
#gen_imgs = generator(noise, gen_y)
# Loss measures generator's ability to fool the discriminator
g_loss = adversarial_loss(discriminator(gen_imgs,gen_y).squeeze(), valid)
g_loss.backward()
optimizer_G.step()
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, args.n_epochs, i, len(dataloader),
d_loss.data.cpu(), g_loss.data.cpu()))
batches_done = epoch * len(dataloader) + i
if batches_done % args.sample_interval == 0:
noise = Variable(torch.FloatTensor(np.random.normal(0, 1, (N_Class**2, args.latent_dim))).cuda())
#fixed labels
y_ = torch.LongTensor(np.array([num for num in range(N_Class)])).view(N_Class,1).expand(-1,N_Class).contiguous()
y_fixed = torch.zeros(N_Class**2, N_Class)
y_fixed = Variable(y_fixed.scatter_(1,y_.view(N_Class**2,1),1).cuda())
gen_imgs = generator(noise, y_fixed).view(-1,C,H,W)
save_image(gen_imgs.data, img_save_path + '/%d-%d.png' % (epoch,batches_done), nrow=N_Class, normalize=True) | <filename>scenario_generation/OLD/conditional-GAN.py
'''
https://github.com/TeeyoHuang/conditional-GAN/blob/master/conditional_gan.py
'''
import argparse
import os
import numpy as np
import math
import torchvision.transforms as transforms
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
img_save_path = 'images'
os.makedirs(img_save_path, exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training')
parser.add_argument('--batch_size', type=int, default=64, help='size of the batches')
parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate')
parser.add_argument('--beta1', type=float, default=0.5, help='adam: decay of first order momentum of gradient')
parser.add_argument('--beta2', type=float, default=0.999, help='adam: decay of second order momentum of gradient')
parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation')
parser.add_argument('--latent_dim', type=int, default=100, help='dimensionality of the latent space')
parser.add_argument('--n_classes', type=int, default=10, help='number of classes for dataset')
parser.add_argument('--img_size', type=int, default=28, help='size of each image dimension')
parser.add_argument('--channels', type=int, default=1, help='number of image channels')
parser.add_argument('--sample_interval', type=int, default=200, help='interval between image sampling')
args = parser.parse_args()
print(args)
C,H,W = args.channels, args.img_size, args.img_size
def weights_init_normal(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
torch.nn.init.normal(m.weight, 0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
torch.nn.init.normal(m.weight, 1.0, 0.02)
torch.nn.init.constant(m.bias, 0.0)
class Generator(nn.Module):
# initializers
def __init__(self):
super(Generator, self).__init__()
self.fc1_1 = nn.Linear(100, 256)
self.fc1_1_bn = nn.BatchNorm1d(256)
self.fc1_2 = nn.Linear(10, 256)
self.fc1_2_bn = nn.BatchNorm1d(256)
self.fc2 = nn.Linear(512, 512)
self.fc2_bn = nn.BatchNorm1d(512)
self.fc3 = nn.Linear(512, 1024)
self.fc3_bn = nn.BatchNorm1d(1024)
self.fc4 = nn.Linear(1024, H*W)
# forward method
def forward(self, input, label):
x = F.relu(self.fc1_1_bn(self.fc1_1(input)))
y = F.relu(self.fc1_2_bn(self.fc1_2(label)))
x = torch.cat([x, y], 1)
x = F.relu(self.fc2_bn(self.fc2(x)))
x = F.relu(self.fc3_bn(self.fc3(x)))
x = F.tanh(self.fc4(x))
return x
class Discriminator(nn.Module):
# initializers
def __init__(self):
super(Discriminator, self).__init__()
self.fc1_1 = nn.Linear(H*W, 1024)
self.fc1_2 = nn.Linear(10, 1024)
self.fc2 = nn.Linear(2048, 512)
self.fc2_bn = nn.BatchNorm1d(512)
self.fc3 = nn.Linear(512, 256)
self.fc3_bn = nn.BatchNorm1d(256)
self.fc4 = nn.Linear(256, 1)
# forward method
def forward(self, input, label):
x = F.leaky_relu(self.fc1_1(input.view(input.size(0),-1)), 0.2)
y = F.leaky_relu(self.fc1_2(label), 0.2)
x = torch.cat([x, y], 1)
x = F.leaky_relu(self.fc2_bn(self.fc2(x)), 0.2)
x = F.leaky_relu(self.fc3_bn(self.fc3(x)), 0.2)
x = F.sigmoid(self.fc4(x))
return x
# Loss function
adversarial_loss = torch.nn.BCELoss()
# Initialize Generator and discriminator
generator = Generator()
discriminator = Discriminator()
if torch.cuda.is_available():
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Initialize weights
generator.apply(weights_init_normal)
discriminator.apply(weights_init_normal)
# Configure data loader
os.makedirs('../../data', exist_ok=True)
dataloader = torch.utils.data.DataLoader(
datasets.MNIST('../../data', train=True, download=True,
transform=transforms.Compose([
transforms.Resize(args.img_size),
transforms.ToTensor(),
transforms.Normalize(0.5, 0.5)
])),
batch_size=args.batch_size, shuffle=True, drop_last=True)
print('the data is ok')
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
batches_done=0
for epoch in range(args.n_epochs):
for i, (imgs, labels) in enumerate(dataloader):
Batch_Size = args.batch_size
N_Class = args.n_classes
# Adversarial ground truths
valid = Variable(torch.ones(Batch_Size).cuda(), requires_grad=False)
fake = Variable(torch.zeros(Batch_Size).cuda(), requires_grad=False)
# Configure input
real_imgs = Variable(imgs.type(torch.FloatTensor).cuda())
real_y = torch.zeros(Batch_Size, N_Class)
real_y = Variable(real_y.scatter_(1, labels.view(Batch_Size, 1), 1).cuda())
#y = Variable(y.cuda())
# Sample noise and labels as generator input
noise = Variable(torch.randn((Batch_Size, args.latent_dim)).cuda())
gen_labels = (torch.rand(Batch_Size, 1) * N_Class).type(torch.LongTensor)
gen_y = torch.zeros(Batch_Size, N_Class)
gen_y = Variable(gen_y.scatter_(1, gen_labels.view(Batch_Size, 1), 1).cuda())
# ---------------------
# Train Discriminator
# ---------------------
optimizer_D.zero_grad()
# Loss for real images
d_real_loss = adversarial_loss(discriminator(real_imgs, real_y).squeeze(), valid)
# Loss for fake images
gen_imgs = generator(noise, gen_y)
d_fake_loss = adversarial_loss(discriminator(gen_imgs.detach(),gen_y).squeeze(), fake)
# Total discriminator loss
d_loss = (d_real_loss + d_fake_loss)
d_loss.backward()
optimizer_D.step()
# -----------------
# Train Generator
# -----------------
optimizer_G.zero_grad()
# Generate a batch of images
#gen_imgs = generator(noise, gen_y)
# Loss measures generator's ability to fool the discriminator
g_loss = adversarial_loss(discriminator(gen_imgs,gen_y).squeeze(), valid)
g_loss.backward()
optimizer_G.step()
print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" % (epoch, args.n_epochs, i, len(dataloader),
d_loss.data.cpu(), g_loss.data.cpu()))
batches_done = epoch * len(dataloader) + i
if batches_done % args.sample_interval == 0:
noise = Variable(torch.FloatTensor(np.random.normal(0, 1, (N_Class**2, args.latent_dim))).cuda())
#fixed labels
y_ = torch.LongTensor(np.array([num for num in range(N_Class)])).view(N_Class,1).expand(-1,N_Class).contiguous()
y_fixed = torch.zeros(N_Class**2, N_Class)
y_fixed = Variable(y_fixed.scatter_(1,y_.view(N_Class**2,1),1).cuda())
gen_imgs = generator(noise, y_fixed).view(-1,C,H,W)
save_image(gen_imgs.data, img_save_path + '/%d-%d.png' % (epoch,batches_done), nrow=N_Class, normalize=True) | en | 0.606213 | https://github.com/TeeyoHuang/conditional-GAN/blob/master/conditional_gan.py # initializers # forward method # initializers # forward method # Loss function # Initialize Generator and discriminator # Initialize weights # Configure data loader # Optimizers # Adversarial ground truths # Configure input #y = Variable(y.cuda()) # Sample noise and labels as generator input # --------------------- # Train Discriminator # --------------------- # Loss for real images # Loss for fake images # Total discriminator loss # ----------------- # Train Generator # ----------------- # Generate a batch of images #gen_imgs = generator(noise, gen_y) # Loss measures generator's ability to fool the discriminator #fixed labels | 2.216072 | 2 |
setup.py | zjffdu/pyzeppelin | 1 | 6618768 | <gh_stars>1-10
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
VERSION = '0.1.0'
PACKAGE_NAME = 'pyzeppelin'
AUTHOR = 'You'
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/zjffdu/pyzeppelin'
LICENSE = 'Apache License 2.0'
DESCRIPTION = 'Python API of Zeppelin SDK'
LONG_DESCRIPTION = (HERE / "README.md").read_text()
LONG_DESC_TYPE = "text/markdown"
INSTALL_REQUIRES = [
'requests'
]
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
license=LICENSE,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
packages=find_packages()
)
| #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from setuptools import setup, find_packages
HERE = pathlib.Path(__file__).parent
VERSION = '0.1.0'
PACKAGE_NAME = 'pyzeppelin'
AUTHOR = 'You'
AUTHOR_EMAIL = '<EMAIL>'
URL = 'https://github.com/zjffdu/pyzeppelin'
LICENSE = 'Apache License 2.0'
DESCRIPTION = 'Python API of Zeppelin SDK'
LONG_DESCRIPTION = (HERE / "README.md").read_text()
LONG_DESC_TYPE = "text/markdown"
INSTALL_REQUIRES = [
'requests'
]
setup(name=PACKAGE_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESC_TYPE,
author=AUTHOR,
license=LICENSE,
author_email=AUTHOR_EMAIL,
url=URL,
install_requires=INSTALL_REQUIRES,
packages=find_packages()
) | en | 0.863441 | # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. | 1.306045 | 1 |
tests/test_hot_water.py | vit-/pymultiMATIC | 0 | 6618769 | """Test for hot water."""
import unittest
from pymultimatic.model import OperatingModes, constants
from tests.conftest import _hotwater
class HotWaterTest(unittest.TestCase):
"""Test class."""
def test_get_active_mode_on(self) -> None:
"""Test active mode on."""
hot_water = _hotwater()
hot_water.operating_mode = OperatingModes.ON
active_mode = hot_water.active_mode
self.assertEqual(OperatingModes.ON, active_mode.current)
self.assertEqual(hot_water.target_high, active_mode.target)
self.assertIsNone(active_mode.sub)
def test_get_active_mode_off(self) -> None:
"""Test active mode off."""
hot_water = _hotwater()
hot_water.operating_mode = OperatingModes.OFF
active_mode = hot_water.active_mode
self.assertEqual(OperatingModes.OFF, active_mode.current)
self.assertEqual(constants.FROST_PROTECTION_TEMP,
active_mode.target)
self.assertIsNone(active_mode.sub)
| """Test for hot water."""
import unittest
from pymultimatic.model import OperatingModes, constants
from tests.conftest import _hotwater
class HotWaterTest(unittest.TestCase):
"""Test class."""
def test_get_active_mode_on(self) -> None:
"""Test active mode on."""
hot_water = _hotwater()
hot_water.operating_mode = OperatingModes.ON
active_mode = hot_water.active_mode
self.assertEqual(OperatingModes.ON, active_mode.current)
self.assertEqual(hot_water.target_high, active_mode.target)
self.assertIsNone(active_mode.sub)
def test_get_active_mode_off(self) -> None:
"""Test active mode off."""
hot_water = _hotwater()
hot_water.operating_mode = OperatingModes.OFF
active_mode = hot_water.active_mode
self.assertEqual(OperatingModes.OFF, active_mode.current)
self.assertEqual(constants.FROST_PROTECTION_TEMP,
active_mode.target)
self.assertIsNone(active_mode.sub)
| en | 0.836976 | Test for hot water. Test class. Test active mode on. Test active mode off. | 3.089191 | 3 |
metadata-ingestion/src/datahub/ingestion/source/identity/azure_ad.py | chinmay-bhat/datahub | 1 | 6618770 | <gh_stars>1-10
import json
import logging
import re
import urllib
from dataclasses import dataclass, field
from typing import Dict, Iterable, List, Union
import click
import requests
from datahub.configuration import ConfigModel
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import (
CorpGroupSnapshot,
CorpUserSnapshot,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.schema_classes import (
CorpGroupInfoClass,
CorpUserInfoClass,
GroupMembershipClass,
)
logger = logging.getLogger(__name__)
class AzureADConfig(ConfigModel):
"""Config to create a token and connect to Azure AD instance"""
# Required
client_id: str
tenant_id: str
client_secret: str
redirect: str
authority: str
token_url: str
graph_url: str
# Optional: Customize the mapping to DataHub Username from an attribute in the REST API response
# Reference: https://docs.microsoft.com/en-us/graph/api/user-list?view=graph-rest-1.0&tabs=http#response-1
azure_ad_response_to_username_attr: str = "mail"
azure_ad_response_to_username_regex: str = "([^@]+)"
# Optional: Customize the mapping to DataHub Groupname from an attribute in the REST API response
# Reference: https://docs.microsoft.com/en-us/graph/api/group-list?view=graph-rest-1.0&tabs=http#response-1
azure_ad_response_to_groupname_attr: str = "displayName"
azure_ad_response_to_groupname_regex: str = "(.*)"
# Optional: to ingest users, groups or both
ingest_users: bool = True
ingest_groups: bool = True
ingest_group_membership: bool = True
@dataclass
class AzureADSourceReport(SourceReport):
filtered: List[str] = field(default_factory=list)
def report_filtered(self, name: str) -> None:
self.filtered.append(name)
# Source that extracts Azure AD users, groups and group memberships using Microsoft Graph REST API
#
# Validated against load:
# - user count: 1000
# - group count: 100
# - group membership edges: 1000 (1 per user)
class AzureADSource(Source):
"""Ingest Azure AD Users and Groups into DataHub"""
@classmethod
def create(cls, config_dict, ctx):
config = AzureADConfig.parse_obj(config_dict)
return cls(config, ctx)
def __init__(self, config: AzureADConfig, ctx: PipelineContext):
super().__init__(ctx)
self.config = config
self.report = AzureADSourceReport()
self.token_data = {
"grant_type": "client_credentials",
"client_id": self.config.client_id,
"tenant_id": self.config.tenant_id,
"client_secret": self.config.client_secret,
"resource": "https://graph.microsoft.com",
"scope": "https://graph.microsoft.com/.default",
}
self.token = self.get_token()
def get_token(self):
token_response = requests.post(self.config.token_url, data=self.token_data)
if token_response.status_code == 200:
token = token_response.json().get("access_token")
return token
else:
error_str = f"Token response status code: {str(token_response.status_code)}. Token response content: {str(token_response.content)}"
logger.error(error_str)
self.report.report_failure("get_token", error_str)
click.echo("Error: Token response invalid")
exit()
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
# Create MetadataWorkUnits for CorpGroups
if self.config.ingest_groups:
azure_ad_groups = next(self._get_azure_ad_groups())
datahub_corp_group_snapshots = self._map_azure_ad_groups(azure_ad_groups)
for datahub_corp_group_snapshot in datahub_corp_group_snapshots:
mce = MetadataChangeEvent(proposedSnapshot=datahub_corp_group_snapshot)
wu = MetadataWorkUnit(id=datahub_corp_group_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
# Populate GroupMembership Aspects for CorpUsers
datahub_corp_user_urn_to_group_membership: Dict[str, GroupMembershipClass] = {}
if self.config.ingest_group_membership and azure_ad_groups:
# Fetch membership for each group
for azure_ad_group in azure_ad_groups:
datahub_corp_group_urn = self._map_azure_ad_group_to_urn(azure_ad_group)
if not datahub_corp_group_urn:
error_str = "Failed to extract DataHub Group Name from Azure AD Group named {}. Skipping...".format(
azure_ad_group.get("displayName")
)
self.report.report_failure("azure_ad_group_mapping", error_str)
continue
# Extract and map users for each group
azure_ad_group_users = next(
self._get_azure_ad_group_users(azure_ad_group)
)
# if group doesn't have any members, continue
if not azure_ad_group_users:
continue
for azure_ad_user in azure_ad_group_users:
datahub_corp_user_urn = self._map_azure_ad_user_to_urn(
azure_ad_user
)
if not datahub_corp_user_urn:
error_str = "Failed to extract DataHub Username from Azure ADUser {}. Skipping...".format(
azure_ad_user.get("displayName")
)
self.report.report_failure("azure_ad_user_mapping", error_str)
continue
# update/create the GroupMembership aspect for this group member.
if (
datahub_corp_user_urn
in datahub_corp_user_urn_to_group_membership
):
datahub_corp_user_urn_to_group_membership[
datahub_corp_user_urn
].groups.append(datahub_corp_group_urn)
else:
datahub_corp_user_urn_to_group_membership[
datahub_corp_user_urn
] = GroupMembershipClass(groups=[datahub_corp_group_urn])
# Create MetadatWorkUnits for CorpUsers
if self.config.ingest_users:
azure_ad_users = next(self._get_azure_ad_users())
datahub_corp_user_snapshots = self._map_azure_ad_users(azure_ad_users)
for datahub_corp_user_snapshot in datahub_corp_user_snapshots:
# Add GroupMembership if applicable
if (
datahub_corp_user_snapshot.urn
in datahub_corp_user_urn_to_group_membership
):
datahub_group_membership = (
datahub_corp_user_urn_to_group_membership.get(
datahub_corp_user_snapshot.urn
)
)
assert datahub_group_membership
datahub_corp_user_snapshot.aspects.append(datahub_group_membership)
mce = MetadataChangeEvent(proposedSnapshot=datahub_corp_user_snapshot)
wu = MetadataWorkUnit(id=datahub_corp_user_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
def get_report(self) -> SourceReport:
return self.report
def close(self) -> None:
pass
def _get_azure_ad_groups(self):
headers = {"Authorization": "Bearer {}".format(self.token)}
url = self.config.graph_url + "/groups"
while True:
if not url:
break
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = json.loads(response.text)
url = json_data.get("@odata.nextLink", None)
yield json_data["value"]
else:
error_str = f"Response status code: {str(response.status_code)}. Response content: {str(response.content)}"
logger.error(error_str)
self.report.report_failure("_get_azure_ad_groups", error_str)
continue
def _get_azure_ad_users(self):
headers = {"Authorization": "Bearer {}".format(self.token)}
url = self.config.graph_url + "/users"
while True:
if not url:
break
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = json.loads(response.text)
url = json_data.get("@odata.nextLink", None)
yield json_data["value"]
else:
error_str = f"Response status code: {str(response.status_code)}. Response content: {str(response.content)}"
logger.error(error_str)
self.report.report_failure("_get_azure_ad_groups", error_str)
continue
def _get_azure_ad_group_users(self, azure_ad_group):
headers = {"Authorization": "Bearer {}".format(self.token)}
url = "{0}/groups/{1}/members".format(
self.config.graph_url, azure_ad_group.get("id")
)
while True:
if not url:
break
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = json.loads(response.text)
url = json_data.get("@odata.nextLink", None)
yield json_data["value"]
else:
error_str = f"Response status code: {str(response.status_code)}. Response content: {str(response.content)}"
logger.error(error_str)
self.report.report_failure("_get_azure_ad_groups", error_str)
continue
def _map_azure_ad_groups(self, azure_ad_groups):
for azure_ad_group in azure_ad_groups:
corp_group_urn = self._map_azure_ad_group_to_urn(azure_ad_group)
if not corp_group_urn:
error_str = "Failed to extract DataHub Group Name from Azure Group for group named {}. Skipping...".format(
azure_ad_group.get("displayName")
)
logger.error(error_str)
self.report.report_failure("azure_ad_group_mapping", error_str)
continue
corp_group_snapshot = CorpGroupSnapshot(
urn=corp_group_urn,
aspects=[],
)
corp_group_info = self._map_azure_ad_group_to_corp_group(azure_ad_group)
corp_group_snapshot.aspects.append(corp_group_info)
yield corp_group_snapshot
# Converts Azure group profile into DataHub CorpGroupInfoClass Aspect
def _map_azure_ad_group_to_corp_group(self, group):
return CorpGroupInfoClass(
displayName=self._map_azure_ad_group_to_group_name(group),
description=group.get("description"),
email=group.get("mail"),
members=[],
groups=[],
admins=[],
)
# Creates Datahub CorpGroup Urn from Azure AD Group object
def _map_azure_ad_group_to_urn(self, azure_ad_group):
group_name = self._map_azure_ad_group_to_group_name(azure_ad_group)
if not group_name:
return None
# URL encode the group name to deal with potential spaces
url_encoded_group_name = urllib.parse.quote(group_name)
return self._make_corp_group_urn(url_encoded_group_name)
def _map_azure_ad_group_to_group_name(self, azure_ad_group):
return self._extract_regex_match_from_dict_value(
azure_ad_group,
self.config.azure_ad_response_to_groupname_attr,
self.config.azure_ad_response_to_groupname_regex,
)
def _map_azure_ad_users(self, azure_ad_users):
for user in azure_ad_users:
corp_user_urn = self._map_azure_ad_user_to_urn(user)
if not corp_user_urn:
error_str = "Failed to extract DataHub Username from Azure AD User {}. Skipping...".format(
user.get("displayName")
)
logger.error(error_str)
self.report.report_failure("azure_ad_user_mapping", error_str)
continue
corp_user_snapshot = CorpUserSnapshot(
urn=corp_user_urn,
aspects=[],
)
corp_user_info = self._map_azure_ad_user_to_corp_user(user)
corp_user_snapshot.aspects.append(corp_user_info)
yield corp_user_snapshot
def _map_azure_ad_user_to_user_name(self, azure_ad_user):
return self._extract_regex_match_from_dict_value(
azure_ad_user,
self.config.azure_ad_response_to_username_attr,
self.config.azure_ad_response_to_username_regex,
)
# Creates DataHub CorpUser Urn from Azure AD User object
def _map_azure_ad_user_to_urn(self, azure_ad_user):
user_name = self._map_azure_ad_user_to_user_name(azure_ad_user)
if not user_name:
return None
return self._make_corp_user_urn(user_name)
def _map_azure_ad_user_to_corp_user(self, azure_ad_user):
full_name = (
str(azure_ad_user.get("givenName", ""))
+ " "
+ str(azure_ad_user.get("surname", ""))
)
return CorpUserInfoClass(
active=True,
displayName=azure_ad_user.get("displayName", full_name),
firstName=azure_ad_user.get("givenName", None),
lastName=azure_ad_user.get("surname", None),
fullName=full_name,
email=azure_ad_user.get("mail"),
title=azure_ad_user.get("jobTitle", None),
countryCode=azure_ad_user.get("mobilePhone", None),
)
def _make_corp_group_urn(self, groupname: str) -> str:
return f"urn:li:corpGroup:{groupname}"
def _make_corp_user_urn(self, username: str) -> str:
return f"urn:li:corpuser:{username}"
def _extract_regex_match_from_dict_value(
self, str_dict: Dict[str, str], key: str, pattern: str
) -> Union[str, None]:
raw_value = str_dict.get(key)
if raw_value is None:
return None
match = re.search(pattern, raw_value)
if match is None:
return None
return match.group()
| import json
import logging
import re
import urllib
from dataclasses import dataclass, field
from typing import Dict, Iterable, List, Union
import click
import requests
from datahub.configuration import ConfigModel
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import (
CorpGroupSnapshot,
CorpUserSnapshot,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.schema_classes import (
CorpGroupInfoClass,
CorpUserInfoClass,
GroupMembershipClass,
)
logger = logging.getLogger(__name__)
class AzureADConfig(ConfigModel):
"""Config to create a token and connect to Azure AD instance"""
# Required
client_id: str
tenant_id: str
client_secret: str
redirect: str
authority: str
token_url: str
graph_url: str
# Optional: Customize the mapping to DataHub Username from an attribute in the REST API response
# Reference: https://docs.microsoft.com/en-us/graph/api/user-list?view=graph-rest-1.0&tabs=http#response-1
azure_ad_response_to_username_attr: str = "mail"
azure_ad_response_to_username_regex: str = "([^@]+)"
# Optional: Customize the mapping to DataHub Groupname from an attribute in the REST API response
# Reference: https://docs.microsoft.com/en-us/graph/api/group-list?view=graph-rest-1.0&tabs=http#response-1
azure_ad_response_to_groupname_attr: str = "displayName"
azure_ad_response_to_groupname_regex: str = "(.*)"
# Optional: to ingest users, groups or both
ingest_users: bool = True
ingest_groups: bool = True
ingest_group_membership: bool = True
@dataclass
class AzureADSourceReport(SourceReport):
filtered: List[str] = field(default_factory=list)
def report_filtered(self, name: str) -> None:
self.filtered.append(name)
# Source that extracts Azure AD users, groups and group memberships using Microsoft Graph REST API
#
# Validated against load:
# - user count: 1000
# - group count: 100
# - group membership edges: 1000 (1 per user)
class AzureADSource(Source):
"""Ingest Azure AD Users and Groups into DataHub"""
@classmethod
def create(cls, config_dict, ctx):
config = AzureADConfig.parse_obj(config_dict)
return cls(config, ctx)
def __init__(self, config: AzureADConfig, ctx: PipelineContext):
super().__init__(ctx)
self.config = config
self.report = AzureADSourceReport()
self.token_data = {
"grant_type": "client_credentials",
"client_id": self.config.client_id,
"tenant_id": self.config.tenant_id,
"client_secret": self.config.client_secret,
"resource": "https://graph.microsoft.com",
"scope": "https://graph.microsoft.com/.default",
}
self.token = self.get_token()
def get_token(self):
token_response = requests.post(self.config.token_url, data=self.token_data)
if token_response.status_code == 200:
token = token_response.json().get("access_token")
return token
else:
error_str = f"Token response status code: {str(token_response.status_code)}. Token response content: {str(token_response.content)}"
logger.error(error_str)
self.report.report_failure("get_token", error_str)
click.echo("Error: Token response invalid")
exit()
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
# Create MetadataWorkUnits for CorpGroups
if self.config.ingest_groups:
azure_ad_groups = next(self._get_azure_ad_groups())
datahub_corp_group_snapshots = self._map_azure_ad_groups(azure_ad_groups)
for datahub_corp_group_snapshot in datahub_corp_group_snapshots:
mce = MetadataChangeEvent(proposedSnapshot=datahub_corp_group_snapshot)
wu = MetadataWorkUnit(id=datahub_corp_group_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
# Populate GroupMembership Aspects for CorpUsers
datahub_corp_user_urn_to_group_membership: Dict[str, GroupMembershipClass] = {}
if self.config.ingest_group_membership and azure_ad_groups:
# Fetch membership for each group
for azure_ad_group in azure_ad_groups:
datahub_corp_group_urn = self._map_azure_ad_group_to_urn(azure_ad_group)
if not datahub_corp_group_urn:
error_str = "Failed to extract DataHub Group Name from Azure AD Group named {}. Skipping...".format(
azure_ad_group.get("displayName")
)
self.report.report_failure("azure_ad_group_mapping", error_str)
continue
# Extract and map users for each group
azure_ad_group_users = next(
self._get_azure_ad_group_users(azure_ad_group)
)
# if group doesn't have any members, continue
if not azure_ad_group_users:
continue
for azure_ad_user in azure_ad_group_users:
datahub_corp_user_urn = self._map_azure_ad_user_to_urn(
azure_ad_user
)
if not datahub_corp_user_urn:
error_str = "Failed to extract DataHub Username from Azure ADUser {}. Skipping...".format(
azure_ad_user.get("displayName")
)
self.report.report_failure("azure_ad_user_mapping", error_str)
continue
# update/create the GroupMembership aspect for this group member.
if (
datahub_corp_user_urn
in datahub_corp_user_urn_to_group_membership
):
datahub_corp_user_urn_to_group_membership[
datahub_corp_user_urn
].groups.append(datahub_corp_group_urn)
else:
datahub_corp_user_urn_to_group_membership[
datahub_corp_user_urn
] = GroupMembershipClass(groups=[datahub_corp_group_urn])
# Create MetadatWorkUnits for CorpUsers
if self.config.ingest_users:
azure_ad_users = next(self._get_azure_ad_users())
datahub_corp_user_snapshots = self._map_azure_ad_users(azure_ad_users)
for datahub_corp_user_snapshot in datahub_corp_user_snapshots:
# Add GroupMembership if applicable
if (
datahub_corp_user_snapshot.urn
in datahub_corp_user_urn_to_group_membership
):
datahub_group_membership = (
datahub_corp_user_urn_to_group_membership.get(
datahub_corp_user_snapshot.urn
)
)
assert datahub_group_membership
datahub_corp_user_snapshot.aspects.append(datahub_group_membership)
mce = MetadataChangeEvent(proposedSnapshot=datahub_corp_user_snapshot)
wu = MetadataWorkUnit(id=datahub_corp_user_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
def get_report(self) -> SourceReport:
return self.report
def close(self) -> None:
pass
def _get_azure_ad_groups(self):
headers = {"Authorization": "Bearer {}".format(self.token)}
url = self.config.graph_url + "/groups"
while True:
if not url:
break
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = json.loads(response.text)
url = json_data.get("@odata.nextLink", None)
yield json_data["value"]
else:
error_str = f"Response status code: {str(response.status_code)}. Response content: {str(response.content)}"
logger.error(error_str)
self.report.report_failure("_get_azure_ad_groups", error_str)
continue
def _get_azure_ad_users(self):
headers = {"Authorization": "Bearer {}".format(self.token)}
url = self.config.graph_url + "/users"
while True:
if not url:
break
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = json.loads(response.text)
url = json_data.get("@odata.nextLink", None)
yield json_data["value"]
else:
error_str = f"Response status code: {str(response.status_code)}. Response content: {str(response.content)}"
logger.error(error_str)
self.report.report_failure("_get_azure_ad_groups", error_str)
continue
def _get_azure_ad_group_users(self, azure_ad_group):
headers = {"Authorization": "Bearer {}".format(self.token)}
url = "{0}/groups/{1}/members".format(
self.config.graph_url, azure_ad_group.get("id")
)
while True:
if not url:
break
response = requests.get(url, headers=headers)
if response.status_code == 200:
json_data = json.loads(response.text)
url = json_data.get("@odata.nextLink", None)
yield json_data["value"]
else:
error_str = f"Response status code: {str(response.status_code)}. Response content: {str(response.content)}"
logger.error(error_str)
self.report.report_failure("_get_azure_ad_groups", error_str)
continue
def _map_azure_ad_groups(self, azure_ad_groups):
for azure_ad_group in azure_ad_groups:
corp_group_urn = self._map_azure_ad_group_to_urn(azure_ad_group)
if not corp_group_urn:
error_str = "Failed to extract DataHub Group Name from Azure Group for group named {}. Skipping...".format(
azure_ad_group.get("displayName")
)
logger.error(error_str)
self.report.report_failure("azure_ad_group_mapping", error_str)
continue
corp_group_snapshot = CorpGroupSnapshot(
urn=corp_group_urn,
aspects=[],
)
corp_group_info = self._map_azure_ad_group_to_corp_group(azure_ad_group)
corp_group_snapshot.aspects.append(corp_group_info)
yield corp_group_snapshot
# Converts Azure group profile into DataHub CorpGroupInfoClass Aspect
def _map_azure_ad_group_to_corp_group(self, group):
return CorpGroupInfoClass(
displayName=self._map_azure_ad_group_to_group_name(group),
description=group.get("description"),
email=group.get("mail"),
members=[],
groups=[],
admins=[],
)
# Creates Datahub CorpGroup Urn from Azure AD Group object
def _map_azure_ad_group_to_urn(self, azure_ad_group):
group_name = self._map_azure_ad_group_to_group_name(azure_ad_group)
if not group_name:
return None
# URL encode the group name to deal with potential spaces
url_encoded_group_name = urllib.parse.quote(group_name)
return self._make_corp_group_urn(url_encoded_group_name)
def _map_azure_ad_group_to_group_name(self, azure_ad_group):
return self._extract_regex_match_from_dict_value(
azure_ad_group,
self.config.azure_ad_response_to_groupname_attr,
self.config.azure_ad_response_to_groupname_regex,
)
def _map_azure_ad_users(self, azure_ad_users):
for user in azure_ad_users:
corp_user_urn = self._map_azure_ad_user_to_urn(user)
if not corp_user_urn:
error_str = "Failed to extract DataHub Username from Azure AD User {}. Skipping...".format(
user.get("displayName")
)
logger.error(error_str)
self.report.report_failure("azure_ad_user_mapping", error_str)
continue
corp_user_snapshot = CorpUserSnapshot(
urn=corp_user_urn,
aspects=[],
)
corp_user_info = self._map_azure_ad_user_to_corp_user(user)
corp_user_snapshot.aspects.append(corp_user_info)
yield corp_user_snapshot
def _map_azure_ad_user_to_user_name(self, azure_ad_user):
return self._extract_regex_match_from_dict_value(
azure_ad_user,
self.config.azure_ad_response_to_username_attr,
self.config.azure_ad_response_to_username_regex,
)
# Creates DataHub CorpUser Urn from Azure AD User object
def _map_azure_ad_user_to_urn(self, azure_ad_user):
user_name = self._map_azure_ad_user_to_user_name(azure_ad_user)
if not user_name:
return None
return self._make_corp_user_urn(user_name)
def _map_azure_ad_user_to_corp_user(self, azure_ad_user):
full_name = (
str(azure_ad_user.get("givenName", ""))
+ " "
+ str(azure_ad_user.get("surname", ""))
)
return CorpUserInfoClass(
active=True,
displayName=azure_ad_user.get("displayName", full_name),
firstName=azure_ad_user.get("givenName", None),
lastName=azure_ad_user.get("surname", None),
fullName=full_name,
email=azure_ad_user.get("mail"),
title=azure_ad_user.get("jobTitle", None),
countryCode=azure_ad_user.get("mobilePhone", None),
)
def _make_corp_group_urn(self, groupname: str) -> str:
return f"urn:li:corpGroup:{groupname}"
def _make_corp_user_urn(self, username: str) -> str:
return f"urn:li:corpuser:{username}"
def _extract_regex_match_from_dict_value(
self, str_dict: Dict[str, str], key: str, pattern: str
) -> Union[str, None]:
raw_value = str_dict.get(key)
if raw_value is None:
return None
match = re.search(pattern, raw_value)
if match is None:
return None
return match.group() | en | 0.749391 | Config to create a token and connect to Azure AD instance # Required # Optional: Customize the mapping to DataHub Username from an attribute in the REST API response # Reference: https://docs.microsoft.com/en-us/graph/api/user-list?view=graph-rest-1.0&tabs=http#response-1 # Optional: Customize the mapping to DataHub Groupname from an attribute in the REST API response # Reference: https://docs.microsoft.com/en-us/graph/api/group-list?view=graph-rest-1.0&tabs=http#response-1 # Optional: to ingest users, groups or both # Source that extracts Azure AD users, groups and group memberships using Microsoft Graph REST API # # Validated against load: # - user count: 1000 # - group count: 100 # - group membership edges: 1000 (1 per user) Ingest Azure AD Users and Groups into DataHub # Create MetadataWorkUnits for CorpGroups # Populate GroupMembership Aspects for CorpUsers # Fetch membership for each group # Extract and map users for each group # if group doesn't have any members, continue # update/create the GroupMembership aspect for this group member. # Create MetadatWorkUnits for CorpUsers # Add GroupMembership if applicable # Converts Azure group profile into DataHub CorpGroupInfoClass Aspect # Creates Datahub CorpGroup Urn from Azure AD Group object # URL encode the group name to deal with potential spaces # Creates DataHub CorpUser Urn from Azure AD User object | 2.110269 | 2 |
2020/day2/1.py | darkterbear/advent-of-code-2015 | 0 | 6618771 | import re
file = open('./input', 'r')
lines = file.readlines()
def valid(line):
portions = re.split('[\- :\n]', line)
min = int(portions[0])
max = int(portions[1])
chars = list(filter(lambda c: c == portions[2], portions[4]))
if len(chars) >= min and len(chars) <= max:
return True
print(len(list(filter(valid, lines))))
| import re
file = open('./input', 'r')
lines = file.readlines()
def valid(line):
portions = re.split('[\- :\n]', line)
min = int(portions[0])
max = int(portions[1])
chars = list(filter(lambda c: c == portions[2], portions[4]))
if len(chars) >= min and len(chars) <= max:
return True
print(len(list(filter(valid, lines))))
| none | 1 | 3.671136 | 4 | |
SUAVE/SUAVE-2.5.0/regression/scripts/payload_range/mission_Embraer_E190_constThr_payload_range.py | Vinicius-Tanigawa/Undergraduate-Research-Project | 0 | 6618772 | <filename>SUAVE/SUAVE-2.5.0/regression/scripts/payload_range/mission_Embraer_E190_constThr_payload_range.py
# full_setup.py
#
# Created: <NAME>, Aug 2014
# Modified:
""" setup file for a mission with a E190
"""
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units
import numpy as np
import pylab as plt
import copy, time
from SUAVE.Core import (
Data, Container,
)
# the analysis functions
from SUAVE.Methods.Performance import payload_range
#from SUAVE.Methods.Propulsion.turbofan_sizing import turbofan_sizing
import sys
sys.path.append('../Vehicles')
# the analysis functions
from Embraer_190 import vehicle_setup, configs_setup
from SUAVE.Methods.Performance import payload_range
from SUAVE.Input_Output.Results import print_parasite_drag, \
print_compress_drag, \
print_engine_data, \
print_mission_breakdown
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# define the problem
configs, analyses = full_setup()
configs.finalize()
analyses.finalize()
# weight analysis
weights = analyses.configs.base.weights
breakdown = weights.evaluate()
# mission analysis
mission = analyses.missions
results = mission.evaluate()
# print engine data into file
print_engine_data(configs.base,filename = 'engine_data.dat')
# print parasite drag data into file
# define reference condition for parasite drag
ref_condition = Data()
ref_condition.mach_number = 0.3
ref_condition.reynolds_number = 20e6
print_parasite_drag(ref_condition,configs.cruise,analyses,'parasite_drag.dat')
# print compressibility drag data into file
print_compress_drag(configs.cruise,analyses,filename = 'compress_drag.dat')
# print mission breakdown
print_mission_breakdown(results,filename='mission_breakdown.dat')
# load older results
#save_results(results)
old_results = load_results()
# plt the old results
plot_mission(results)
plot_mission(old_results,'k-')
# check the results
check_results(results,old_results)
return
# ----------------------------------------------------------------------
# Analysis Setup
# ----------------------------------------------------------------------
def full_setup():
# vehicle data
vehicle = vehicle_setup()
configs = configs_setup(vehicle)
# vehicle analyses
configs_analyses = analyses_setup(configs)
# mission analyses
mission = mission_setup(configs_analyses)
analyses = SUAVE.Analyses.Analysis.Container()
analyses.configs = configs_analyses
analyses.missions = mission
return configs, analyses
# ----------------------------------------------------------------------
# Define the Vehicle Analyses
# ----------------------------------------------------------------------
def analyses_setup(configs):
analyses = SUAVE.Analyses.Analysis.Container()
# build a base analysis for each config
for tag,config in list(configs.items()):
analysis = base_analysis(config)
analyses[tag] = analysis
# adjust analyses for configs
# takeoff_analysis
analyses.takeoff.aerodynamics.drag_coefficient_increment = 0.1000
# landing analysis
aerodynamics = analyses.landing.aerodynamics
# do something here eventually
return analyses
def base_analysis(vehicle):
# ------------------------------------------------------------------
# Initialize the Analyses
# ------------------------------------------------------------------
analyses = SUAVE.Analyses.Vehicle()
# ------------------------------------------------------------------
# Basic Geometry Relations
sizing = SUAVE.Analyses.Sizing.Sizing()
sizing.features.vehicle = vehicle
analyses.append(sizing)
# ------------------------------------------------------------------
# Weights
weights = SUAVE.Analyses.Weights.Weights_Transport()
weights.vehicle = vehicle
analyses.append(weights)
# ------------------------------------------------------------------
# Aerodynamics Analysis
aerodynamics = SUAVE.Analyses.Aerodynamics.Fidelity_Zero()
aerodynamics.geometry = vehicle
aerodynamics.settings.drag_coefficient_increment = 0.0000
aerodynamics.settings.aircraft_span_efficiency_factor = 1.0
analyses.append(aerodynamics)
# ------------------------------------------------------------------
# Stability Analysis
stability = SUAVE.Analyses.Stability.Fidelity_Zero()
stability.geometry = vehicle
analyses.append(stability)
# ------------------------------------------------------------------
# Energy Analysis
energy = SUAVE.Analyses.Energy.Energy()
energy.network=vehicle.networks
analyses.append(energy)
# ------------------------------------------------------------------
# Planet Analysis
planet = SUAVE.Analyses.Planets.Planet()
analyses.append(planet)
# ------------------------------------------------------------------
# Atmosphere Analysis
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmosphere.features.planet = planet.features
analyses.append(atmosphere)
# done!
return analyses
# ----------------------------------------------------------------------
# Define the Mission
# ----------------------------------------------------------------------
def mission_setup(analyses):
# ------------------------------------------------------------------
# Initialize the Mission
# ------------------------------------------------------------------
mission = SUAVE.Analyses.Mission.Sequential_Segments()
mission.tag = 'embraer_e190ar test mission'
# atmospheric model
atmosphere = SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()
planet = SUAVE.Attributes.Planets.Earth()
#airport
airport = SUAVE.Attributes.Airports.Airport()
airport.altitude = 0.0 * Units.ft
airport.delta_isa = 0.0
airport.atmosphere = SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()
mission.airport = airport
# unpack Segments module
Segments = SUAVE.Analyses.Mission.Segments
# ------------------------------------------------------------------
# First Climb Segment: Constant Speed, Constant Throttle
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Throttle_Constant_Speed()
segment.tag = "climb_250kcas"
# connect vehicle configuration
segment.analyses.extend( analyses.takeoff )
# define segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_start = 0.0 * Units.km
segment.altitude_end = 3.048 * Units.km
segment.air_speed = 250.0 * Units.knots
segment.throttle = 1.0
# add to misison
mission.append_segment(segment)
# ------------------------------------------------------------------
# Second Climb Segment: Constant Speed, Constant Throttle
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Throttle_Constant_Speed()
segment.tag = "climb_280kcas"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 32000. * Units.ft
segment.air_speed = 350.0 * Units.knots
segment.throttle = 1.0
# dummy for post process script
segment.climb_rate = 0.1
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Third Climb Segment: Constant Speed, Constant Climb Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Throttle_Constant_Speed()
segment.tag = "climb_final"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 35000. * Units.ft
segment.air_speed = 380.0 * Units.knots
segment.throttle = 1.0
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Cruise Segment: constant speed, constant altitude
# ------------------------------------------------------------------
segment = Segments.Cruise.Constant_Speed_Constant_Altitude()
segment.tag = "cruise"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.air_speed = 450. * Units.knots #230. * Units['m/s']
## 35kft:
# 415. => M = 0.72
# 450. => M = 0.78
# 461. => M = 0.80
## 37kft:
# 447. => M = 0.78
segment.distance = 2100. * Units.nmi
segment.state.numerics.number_control_points = 6
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# First Descent Segment: consant speed, constant segment rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_m0_77"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 9.31 * Units.km
segment.air_speed = 440.0 * Units.knots
segment.descent_rate = 2600. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Second Descent Segment: consant speed, constant segment rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_290kcas"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 3.657 * Units.km
segment.air_speed = 365.0 * Units.knots
segment.descent_rate = 2300. * Units['ft/min']
# append to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Third Descent Segment: consant speed, constant segment rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_250kcas"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 0.0 * Units.km
segment.air_speed = 250.0 * Units.knots
segment.descent_rate = 1500. * Units['ft/min']
# append to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Mission definition complete
# ------------------------------------------------------------------
return mission
#: def define_mission()
# ----------------------------------------------------------------------
# Plot Mission
# ----------------------------------------------------------------------
def plot_mission(results,line_style='bo-'):
# ------------------------------------------------------------------
# Throttle
# ------------------------------------------------------------------
plt.figure("Throttle History")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
eta = results.segments[i].conditions.propulsion.throttle[:,0]
axes.plot(time, eta, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Throttle')
axes.grid(True)
# ------------------------------------------------------------------
# Angle of Attack
# ------------------------------------------------------------------
plt.figure("Angle of Attack History")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
aoa = results.segments[i].conditions.aerodynamics.angle_of_attack[:,0] / Units.deg
axes.plot(time, aoa, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Angle of Attack (deg)')
axes.grid(True)
# ------------------------------------------------------------------
# Fuel Burn Rate
# ------------------------------------------------------------------
plt.figure("Fuel Burn Rate")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
mdot = results.segments[i].conditions.weights.vehicle_mass_rate[:,0]
axes.plot(time, mdot, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Fuel Burn Rate (kg/s)')
axes.grid(True)
## # ------------------------------------------------------------------
## # Engine SFC
## # ------------------------------------------------------------------
## plt.figure("Engine SFC")
## axes = plt.gca()
## for i in range(len(results.segments)):
## time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
## mdot = results.segments[i].conditions.weights.vehicle_mass_rate[:,0] * 360.
## Thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,0] / 9.81
## sfc = np.divide(mdot,Thrust)
## axes.plot(time, sfc, line_style)
## axes.set_xlabel('Time (mins)')
## axes.set_ylabel('Engine SFC (kg/kg)')
## axes.grid(True)
# ------------------------------------------------------------------
# Altitude
# ------------------------------------------------------------------
plt.figure("Altitude")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
altitude = results.segments[i].conditions.freestream.altitude[:,0] / Units.km
axes.plot(time, altitude, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Altitude (km)')
axes.grid(True)
# ------------------------------------------------------------------
# Vehicle Mass
# ------------------------------------------------------------------
plt.figure("Vehicle Mass")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
mass = results.segments[i].conditions.weights.total_mass[:,0]
axes.plot(time, mass, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Vehicle Mass (kg)')
axes.grid(True)
# ------------------------------------------------------------------
# Aerodynamics
# ------------------------------------------------------------------
fig = plt.figure("Aerodynamic Forces")
for segment in list(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
Lift = -segment.conditions.frames.wind.lift_force_vector[:,2]
Drag = -segment.conditions.frames.wind.drag_force_vector[:,0]
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
axes = fig.add_subplot(4,1,1)
axes.plot( time , Lift , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Lift (N)')
axes.grid(True)
axes = fig.add_subplot(4,1,2)
axes.plot( time , Drag , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Drag (N)')
axes.grid(True)
axes = fig.add_subplot(4,1,3)
axes.plot( time , Thrust , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Thrust (N)')
axes.grid(True)
try:
Pitching_moment = segment.conditions.stability.static.cm_alpha[:,0]
axes = fig.add_subplot(4,1,4)
axes.plot( time , Pitching_moment , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Pitching_moment (~)')
axes.grid(True)
except:
pass
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Aerodynamic Coefficients")
for segment in list(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
CLift = segment.conditions.aerodynamics.lift_coefficient[:,0]
CDrag = segment.conditions.aerodynamics.drag_coefficient[:,0]
Drag = -segment.conditions.frames.wind.drag_force_vector[:,0]
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
axes = fig.add_subplot(3,1,1)
axes.plot( time , CLift , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CL')
axes.grid(True)
axes = fig.add_subplot(3,1,2)
axes.plot( time , CDrag , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CD')
axes.grid(True)
axes = fig.add_subplot(3,1,3)
axes.plot( time , Drag , line_style )
axes.plot( time , Thrust , 'ro-' )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Drag and Thrust (N)')
axes.grid(True)
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Drag Components")
axes = plt.gca()
for i, segment in enumerate(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
drag_breakdown = segment.conditions.aerodynamics.drag_breakdown
cdp = drag_breakdown.parasite.total[:,0]
cdi = drag_breakdown.induced.total[:,0]
cdc = drag_breakdown.compressible.total[:,0]
cdm = drag_breakdown.miscellaneous.total[:,0]
cd = drag_breakdown.total[:,0]
if line_style == 'bo-':
axes.plot( time , cdp , 'ko-', label='CD_P' )
axes.plot( time , cdi , 'bo-', label='CD_I' )
axes.plot( time , cdc , 'go-', label='CD_C' )
axes.plot( time , cdm , 'yo-', label='CD_M' )
axes.plot( time , cd , 'ro-', label='CD' )
if i == 0:
axes.legend(loc='upper center')
else:
axes.plot( time , cdp , line_style )
axes.plot( time , cdi , line_style )
axes.plot( time , cdc , line_style )
axes.plot( time , cdm , line_style )
axes.plot( time , cd , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CD')
axes.grid(True)
return
def check_results(new_results,old_results):
# check segment values
check_list = [
'segments.cruise.conditions.aerodynamics.angle_of_attack',
'segments.cruise.conditions.aerodynamics.drag_coefficient',
'segments.cruise.conditions.aerodynamics.lift_coefficient',
#'segments.cruise.conditions.stability.static.Cm_alpha',
'segments.cruise.conditions.stability.static.Cn_beta',
'segments.cruise.conditions.propulsion.throttle',
'segments.cruise.conditions.weights.vehicle_mass_rate',
]
# do the check
for k in check_list:
print(k)
old_val = np.max( old_results.deep_get(k) )
new_val = np.max( new_results.deep_get(k) )
err = (new_val-old_val)/old_val
print('Error at Max:' , err)
assert np.abs(err) < 1e-6 , 'Max Check Failed : %s' % k
old_val = np.min( old_results.deep_get(k) )
new_val = np.min( new_results.deep_get(k) )
err = (new_val-old_val)/old_val
print('Error at Min:' , err)
assert np.abs(err) < 1e-6 , 'Min Check Failed : %s' % k
print('')
## check high level outputs
#def check_vals(a,b):
#if isinstance(a,Data):
#for k in a.keys():
#err = check_vals(a[k],b[k])
#if err is None: continue
#print 'outputs' , k
#print 'Error:' , err
#print ''
#assert np.abs(err) < 1e-6 , 'Outputs Check Failed : %s' % k
#else:
#return (a-b)/a
## do the check
#check_vals(old_results.output,new_results.output)
return
def load_results():
return SUAVE.Input_Output.SUAVE.load('results_mission_E190_constThr.res')
def save_results(results):
SUAVE.Input_Output.SUAVE.archive(results,'results_mission_E190_constThr.res')
return
if __name__ == '__main__':
main()
#plt.show()
| <filename>SUAVE/SUAVE-2.5.0/regression/scripts/payload_range/mission_Embraer_E190_constThr_payload_range.py
# full_setup.py
#
# Created: <NAME>, Aug 2014
# Modified:
""" setup file for a mission with a E190
"""
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import SUAVE
from SUAVE.Core import Units
import numpy as np
import pylab as plt
import copy, time
from SUAVE.Core import (
Data, Container,
)
# the analysis functions
from SUAVE.Methods.Performance import payload_range
#from SUAVE.Methods.Propulsion.turbofan_sizing import turbofan_sizing
import sys
sys.path.append('../Vehicles')
# the analysis functions
from Embraer_190 import vehicle_setup, configs_setup
from SUAVE.Methods.Performance import payload_range
from SUAVE.Input_Output.Results import print_parasite_drag, \
print_compress_drag, \
print_engine_data, \
print_mission_breakdown
# ----------------------------------------------------------------------
# Main
# ----------------------------------------------------------------------
def main():
# define the problem
configs, analyses = full_setup()
configs.finalize()
analyses.finalize()
# weight analysis
weights = analyses.configs.base.weights
breakdown = weights.evaluate()
# mission analysis
mission = analyses.missions
results = mission.evaluate()
# print engine data into file
print_engine_data(configs.base,filename = 'engine_data.dat')
# print parasite drag data into file
# define reference condition for parasite drag
ref_condition = Data()
ref_condition.mach_number = 0.3
ref_condition.reynolds_number = 20e6
print_parasite_drag(ref_condition,configs.cruise,analyses,'parasite_drag.dat')
# print compressibility drag data into file
print_compress_drag(configs.cruise,analyses,filename = 'compress_drag.dat')
# print mission breakdown
print_mission_breakdown(results,filename='mission_breakdown.dat')
# load older results
#save_results(results)
old_results = load_results()
# plt the old results
plot_mission(results)
plot_mission(old_results,'k-')
# check the results
check_results(results,old_results)
return
# ----------------------------------------------------------------------
# Analysis Setup
# ----------------------------------------------------------------------
def full_setup():
# vehicle data
vehicle = vehicle_setup()
configs = configs_setup(vehicle)
# vehicle analyses
configs_analyses = analyses_setup(configs)
# mission analyses
mission = mission_setup(configs_analyses)
analyses = SUAVE.Analyses.Analysis.Container()
analyses.configs = configs_analyses
analyses.missions = mission
return configs, analyses
# ----------------------------------------------------------------------
# Define the Vehicle Analyses
# ----------------------------------------------------------------------
def analyses_setup(configs):
analyses = SUAVE.Analyses.Analysis.Container()
# build a base analysis for each config
for tag,config in list(configs.items()):
analysis = base_analysis(config)
analyses[tag] = analysis
# adjust analyses for configs
# takeoff_analysis
analyses.takeoff.aerodynamics.drag_coefficient_increment = 0.1000
# landing analysis
aerodynamics = analyses.landing.aerodynamics
# do something here eventually
return analyses
def base_analysis(vehicle):
# ------------------------------------------------------------------
# Initialize the Analyses
# ------------------------------------------------------------------
analyses = SUAVE.Analyses.Vehicle()
# ------------------------------------------------------------------
# Basic Geometry Relations
sizing = SUAVE.Analyses.Sizing.Sizing()
sizing.features.vehicle = vehicle
analyses.append(sizing)
# ------------------------------------------------------------------
# Weights
weights = SUAVE.Analyses.Weights.Weights_Transport()
weights.vehicle = vehicle
analyses.append(weights)
# ------------------------------------------------------------------
# Aerodynamics Analysis
aerodynamics = SUAVE.Analyses.Aerodynamics.Fidelity_Zero()
aerodynamics.geometry = vehicle
aerodynamics.settings.drag_coefficient_increment = 0.0000
aerodynamics.settings.aircraft_span_efficiency_factor = 1.0
analyses.append(aerodynamics)
# ------------------------------------------------------------------
# Stability Analysis
stability = SUAVE.Analyses.Stability.Fidelity_Zero()
stability.geometry = vehicle
analyses.append(stability)
# ------------------------------------------------------------------
# Energy Analysis
energy = SUAVE.Analyses.Energy.Energy()
energy.network=vehicle.networks
analyses.append(energy)
# ------------------------------------------------------------------
# Planet Analysis
planet = SUAVE.Analyses.Planets.Planet()
analyses.append(planet)
# ------------------------------------------------------------------
# Atmosphere Analysis
atmosphere = SUAVE.Analyses.Atmospheric.US_Standard_1976()
atmosphere.features.planet = planet.features
analyses.append(atmosphere)
# done!
return analyses
# ----------------------------------------------------------------------
# Define the Mission
# ----------------------------------------------------------------------
def mission_setup(analyses):
# ------------------------------------------------------------------
# Initialize the Mission
# ------------------------------------------------------------------
mission = SUAVE.Analyses.Mission.Sequential_Segments()
mission.tag = 'embraer_e190ar test mission'
# atmospheric model
atmosphere = SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()
planet = SUAVE.Attributes.Planets.Earth()
#airport
airport = SUAVE.Attributes.Airports.Airport()
airport.altitude = 0.0 * Units.ft
airport.delta_isa = 0.0
airport.atmosphere = SUAVE.Attributes.Atmospheres.Earth.US_Standard_1976()
mission.airport = airport
# unpack Segments module
Segments = SUAVE.Analyses.Mission.Segments
# ------------------------------------------------------------------
# First Climb Segment: Constant Speed, Constant Throttle
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Throttle_Constant_Speed()
segment.tag = "climb_250kcas"
# connect vehicle configuration
segment.analyses.extend( analyses.takeoff )
# define segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_start = 0.0 * Units.km
segment.altitude_end = 3.048 * Units.km
segment.air_speed = 250.0 * Units.knots
segment.throttle = 1.0
# add to misison
mission.append_segment(segment)
# ------------------------------------------------------------------
# Second Climb Segment: Constant Speed, Constant Throttle
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Throttle_Constant_Speed()
segment.tag = "climb_280kcas"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 32000. * Units.ft
segment.air_speed = 350.0 * Units.knots
segment.throttle = 1.0
# dummy for post process script
segment.climb_rate = 0.1
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Third Climb Segment: Constant Speed, Constant Climb Rate
# ------------------------------------------------------------------
segment = Segments.Climb.Constant_Throttle_Constant_Speed()
segment.tag = "climb_final"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 35000. * Units.ft
segment.air_speed = 380.0 * Units.knots
segment.throttle = 1.0
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Cruise Segment: constant speed, constant altitude
# ------------------------------------------------------------------
segment = Segments.Cruise.Constant_Speed_Constant_Altitude()
segment.tag = "cruise"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.air_speed = 450. * Units.knots #230. * Units['m/s']
## 35kft:
# 415. => M = 0.72
# 450. => M = 0.78
# 461. => M = 0.80
## 37kft:
# 447. => M = 0.78
segment.distance = 2100. * Units.nmi
segment.state.numerics.number_control_points = 6
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# First Descent Segment: consant speed, constant segment rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_m0_77"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 9.31 * Units.km
segment.air_speed = 440.0 * Units.knots
segment.descent_rate = 2600. * Units['ft/min']
# add to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Second Descent Segment: consant speed, constant segment rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_290kcas"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 3.657 * Units.km
segment.air_speed = 365.0 * Units.knots
segment.descent_rate = 2300. * Units['ft/min']
# append to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Third Descent Segment: consant speed, constant segment rate
# ------------------------------------------------------------------
segment = Segments.Descent.Constant_Speed_Constant_Rate()
segment.tag = "descent_250kcas"
# connect vehicle configuration
segment.analyses.extend( analyses.cruise )
# segment attributes
segment.atmosphere = atmosphere
segment.planet = planet
segment.altitude_end = 0.0 * Units.km
segment.air_speed = 250.0 * Units.knots
segment.descent_rate = 1500. * Units['ft/min']
# append to mission
mission.append_segment(segment)
# ------------------------------------------------------------------
# Mission definition complete
# ------------------------------------------------------------------
return mission
#: def define_mission()
# ----------------------------------------------------------------------
# Plot Mission
# ----------------------------------------------------------------------
def plot_mission(results,line_style='bo-'):
# ------------------------------------------------------------------
# Throttle
# ------------------------------------------------------------------
plt.figure("Throttle History")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
eta = results.segments[i].conditions.propulsion.throttle[:,0]
axes.plot(time, eta, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Throttle')
axes.grid(True)
# ------------------------------------------------------------------
# Angle of Attack
# ------------------------------------------------------------------
plt.figure("Angle of Attack History")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
aoa = results.segments[i].conditions.aerodynamics.angle_of_attack[:,0] / Units.deg
axes.plot(time, aoa, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Angle of Attack (deg)')
axes.grid(True)
# ------------------------------------------------------------------
# Fuel Burn Rate
# ------------------------------------------------------------------
plt.figure("Fuel Burn Rate")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
mdot = results.segments[i].conditions.weights.vehicle_mass_rate[:,0]
axes.plot(time, mdot, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Fuel Burn Rate (kg/s)')
axes.grid(True)
## # ------------------------------------------------------------------
## # Engine SFC
## # ------------------------------------------------------------------
## plt.figure("Engine SFC")
## axes = plt.gca()
## for i in range(len(results.segments)):
## time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
## mdot = results.segments[i].conditions.weights.vehicle_mass_rate[:,0] * 360.
## Thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,0] / 9.81
## sfc = np.divide(mdot,Thrust)
## axes.plot(time, sfc, line_style)
## axes.set_xlabel('Time (mins)')
## axes.set_ylabel('Engine SFC (kg/kg)')
## axes.grid(True)
# ------------------------------------------------------------------
# Altitude
# ------------------------------------------------------------------
plt.figure("Altitude")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
altitude = results.segments[i].conditions.freestream.altitude[:,0] / Units.km
axes.plot(time, altitude, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Altitude (km)')
axes.grid(True)
# ------------------------------------------------------------------
# Vehicle Mass
# ------------------------------------------------------------------
plt.figure("Vehicle Mass")
axes = plt.gca()
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
mass = results.segments[i].conditions.weights.total_mass[:,0]
axes.plot(time, mass, line_style)
axes.set_xlabel('Time (mins)')
axes.set_ylabel('Vehicle Mass (kg)')
axes.grid(True)
# ------------------------------------------------------------------
# Aerodynamics
# ------------------------------------------------------------------
fig = plt.figure("Aerodynamic Forces")
for segment in list(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
Lift = -segment.conditions.frames.wind.lift_force_vector[:,2]
Drag = -segment.conditions.frames.wind.drag_force_vector[:,0]
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
axes = fig.add_subplot(4,1,1)
axes.plot( time , Lift , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Lift (N)')
axes.grid(True)
axes = fig.add_subplot(4,1,2)
axes.plot( time , Drag , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Drag (N)')
axes.grid(True)
axes = fig.add_subplot(4,1,3)
axes.plot( time , Thrust , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Thrust (N)')
axes.grid(True)
try:
Pitching_moment = segment.conditions.stability.static.cm_alpha[:,0]
axes = fig.add_subplot(4,1,4)
axes.plot( time , Pitching_moment , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Pitching_moment (~)')
axes.grid(True)
except:
pass
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Aerodynamic Coefficients")
for segment in list(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
CLift = segment.conditions.aerodynamics.lift_coefficient[:,0]
CDrag = segment.conditions.aerodynamics.drag_coefficient[:,0]
Drag = -segment.conditions.frames.wind.drag_force_vector[:,0]
Thrust = segment.conditions.frames.body.thrust_force_vector[:,0]
axes = fig.add_subplot(3,1,1)
axes.plot( time , CLift , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CL')
axes.grid(True)
axes = fig.add_subplot(3,1,2)
axes.plot( time , CDrag , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CD')
axes.grid(True)
axes = fig.add_subplot(3,1,3)
axes.plot( time , Drag , line_style )
axes.plot( time , Thrust , 'ro-' )
axes.set_xlabel('Time (min)')
axes.set_ylabel('Drag and Thrust (N)')
axes.grid(True)
# ------------------------------------------------------------------
# Aerodynamics 2
# ------------------------------------------------------------------
fig = plt.figure("Drag Components")
axes = plt.gca()
for i, segment in enumerate(results.segments.values()):
time = segment.conditions.frames.inertial.time[:,0] / Units.min
drag_breakdown = segment.conditions.aerodynamics.drag_breakdown
cdp = drag_breakdown.parasite.total[:,0]
cdi = drag_breakdown.induced.total[:,0]
cdc = drag_breakdown.compressible.total[:,0]
cdm = drag_breakdown.miscellaneous.total[:,0]
cd = drag_breakdown.total[:,0]
if line_style == 'bo-':
axes.plot( time , cdp , 'ko-', label='CD_P' )
axes.plot( time , cdi , 'bo-', label='CD_I' )
axes.plot( time , cdc , 'go-', label='CD_C' )
axes.plot( time , cdm , 'yo-', label='CD_M' )
axes.plot( time , cd , 'ro-', label='CD' )
if i == 0:
axes.legend(loc='upper center')
else:
axes.plot( time , cdp , line_style )
axes.plot( time , cdi , line_style )
axes.plot( time , cdc , line_style )
axes.plot( time , cdm , line_style )
axes.plot( time , cd , line_style )
axes.set_xlabel('Time (min)')
axes.set_ylabel('CD')
axes.grid(True)
return
def check_results(new_results,old_results):
# check segment values
check_list = [
'segments.cruise.conditions.aerodynamics.angle_of_attack',
'segments.cruise.conditions.aerodynamics.drag_coefficient',
'segments.cruise.conditions.aerodynamics.lift_coefficient',
#'segments.cruise.conditions.stability.static.Cm_alpha',
'segments.cruise.conditions.stability.static.Cn_beta',
'segments.cruise.conditions.propulsion.throttle',
'segments.cruise.conditions.weights.vehicle_mass_rate',
]
# do the check
for k in check_list:
print(k)
old_val = np.max( old_results.deep_get(k) )
new_val = np.max( new_results.deep_get(k) )
err = (new_val-old_val)/old_val
print('Error at Max:' , err)
assert np.abs(err) < 1e-6 , 'Max Check Failed : %s' % k
old_val = np.min( old_results.deep_get(k) )
new_val = np.min( new_results.deep_get(k) )
err = (new_val-old_val)/old_val
print('Error at Min:' , err)
assert np.abs(err) < 1e-6 , 'Min Check Failed : %s' % k
print('')
## check high level outputs
#def check_vals(a,b):
#if isinstance(a,Data):
#for k in a.keys():
#err = check_vals(a[k],b[k])
#if err is None: continue
#print 'outputs' , k
#print 'Error:' , err
#print ''
#assert np.abs(err) < 1e-6 , 'Outputs Check Failed : %s' % k
#else:
#return (a-b)/a
## do the check
#check_vals(old_results.output,new_results.output)
return
def load_results():
return SUAVE.Input_Output.SUAVE.load('results_mission_E190_constThr.res')
def save_results(results):
SUAVE.Input_Output.SUAVE.archive(results,'results_mission_E190_constThr.res')
return
if __name__ == '__main__':
main()
#plt.show()
| en | 0.292185 | # full_setup.py # # Created: <NAME>, Aug 2014 # Modified: setup file for a mission with a E190 # ---------------------------------------------------------------------- # Imports # ---------------------------------------------------------------------- # the analysis functions #from SUAVE.Methods.Propulsion.turbofan_sizing import turbofan_sizing # the analysis functions # ---------------------------------------------------------------------- # Main # ---------------------------------------------------------------------- # define the problem # weight analysis # mission analysis # print engine data into file # print parasite drag data into file # define reference condition for parasite drag # print compressibility drag data into file # print mission breakdown # load older results #save_results(results) # plt the old results # check the results # ---------------------------------------------------------------------- # Analysis Setup # ---------------------------------------------------------------------- # vehicle data # vehicle analyses # mission analyses # ---------------------------------------------------------------------- # Define the Vehicle Analyses # ---------------------------------------------------------------------- # build a base analysis for each config # adjust analyses for configs # takeoff_analysis # landing analysis # do something here eventually # ------------------------------------------------------------------ # Initialize the Analyses # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Basic Geometry Relations # ------------------------------------------------------------------ # Weights # ------------------------------------------------------------------ # Aerodynamics Analysis # ------------------------------------------------------------------ # Stability Analysis # ------------------------------------------------------------------ # Energy Analysis # ------------------------------------------------------------------ # Planet Analysis # ------------------------------------------------------------------ # Atmosphere Analysis # done! # ---------------------------------------------------------------------- # Define the Mission # ---------------------------------------------------------------------- # ------------------------------------------------------------------ # Initialize the Mission # ------------------------------------------------------------------ # atmospheric model #airport # unpack Segments module # ------------------------------------------------------------------ # First Climb Segment: Constant Speed, Constant Throttle # ------------------------------------------------------------------ # connect vehicle configuration # define segment attributes # add to misison # ------------------------------------------------------------------ # Second Climb Segment: Constant Speed, Constant Throttle # ------------------------------------------------------------------ # connect vehicle configuration # segment attributes # dummy for post process script # add to mission # ------------------------------------------------------------------ # Third Climb Segment: Constant Speed, Constant Climb Rate # ------------------------------------------------------------------ # connect vehicle configuration # segment attributes # add to mission # ------------------------------------------------------------------ # Cruise Segment: constant speed, constant altitude # ------------------------------------------------------------------ # connect vehicle configuration # segment attributes #230. * Units['m/s'] ## 35kft: # 415. => M = 0.72 # 450. => M = 0.78 # 461. => M = 0.80 ## 37kft: # 447. => M = 0.78 # add to mission # ------------------------------------------------------------------ # First Descent Segment: consant speed, constant segment rate # ------------------------------------------------------------------ # connect vehicle configuration # segment attributes # add to mission # ------------------------------------------------------------------ # Second Descent Segment: consant speed, constant segment rate # ------------------------------------------------------------------ # connect vehicle configuration # segment attributes # append to mission # ------------------------------------------------------------------ # Third Descent Segment: consant speed, constant segment rate # ------------------------------------------------------------------ # connect vehicle configuration # segment attributes # append to mission # ------------------------------------------------------------------ # Mission definition complete # ------------------------------------------------------------------ #: def define_mission() # ---------------------------------------------------------------------- # Plot Mission # ---------------------------------------------------------------------- # ------------------------------------------------------------------ # Throttle # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Angle of Attack # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Fuel Burn Rate # ------------------------------------------------------------------ ## # ------------------------------------------------------------------ ## # Engine SFC ## # ------------------------------------------------------------------ ## plt.figure("Engine SFC") ## axes = plt.gca() ## for i in range(len(results.segments)): ## time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min ## mdot = results.segments[i].conditions.weights.vehicle_mass_rate[:,0] * 360. ## Thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,0] / 9.81 ## sfc = np.divide(mdot,Thrust) ## axes.plot(time, sfc, line_style) ## axes.set_xlabel('Time (mins)') ## axes.set_ylabel('Engine SFC (kg/kg)') ## axes.grid(True) # ------------------------------------------------------------------ # Altitude # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Vehicle Mass # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Aerodynamics # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Aerodynamics 2 # ------------------------------------------------------------------ # ------------------------------------------------------------------ # Aerodynamics 2 # ------------------------------------------------------------------ # check segment values #'segments.cruise.conditions.stability.static.Cm_alpha', # do the check ## check high level outputs #def check_vals(a,b): #if isinstance(a,Data): #for k in a.keys(): #err = check_vals(a[k],b[k]) #if err is None: continue #print 'outputs' , k #print 'Error:' , err #print '' #assert np.abs(err) < 1e-6 , 'Outputs Check Failed : %s' % k #else: #return (a-b)/a ## do the check #check_vals(old_results.output,new_results.output) #plt.show() | 1.471014 | 1 |
dockers/API/restV1.py | FlorisFok/DataVisualisation | 0 | 6618773 | <reponame>FlorisFok/DataVisualisation<gh_stars>0
from flask import Flask, jsonify, request
from flask_reverse_proxy_fix.middleware import ReverseProxyPrefixFix
from flask_cors import CORS
import mysql.connector
from mysql.connector import Error
import json
from sklearn.svm import SVR
import pandas as pd
import numpy as np
import datetime
# Setup the API to work Cross orgin and behind a proxy
app = Flask(__name__)
app.config['REVERSE_PROXY_PATH'] = '/foo'
ReverseProxyPrefixFix(app)
CORS(app)
# Center of Amsterdam
CENTER = (52.367612, 4.893884)
@app.route('/', methods=['GET', 'POST'])
def index():
'''
Just return allllll data and test with jsons
'''
if (request.method == 'POST'):
some_json = request.get_json()
return jsonify({"you sent":some_json}), 201
else:
return jsonify({"data":get_full_json()}), 200
# Get last <num> uploads, NOT IN USE FOR PROJECT
@app.route('/last/<int:num>',methods=["GET"])
def choose_random(num):
data = get_full_json()
return jsonify({"data": data[-num:]}), 200
# Get certain columns, NOT IN USE FOR PROJECT
@app.route('/<column>',methods=["GET"])
def roll_dice(column):
cols = column.split('+')
cols = [i for i in cols if i != '']
data = get_full_json()
for col in cols:
if not col in list(data[0].keys()):
return jsonify({'data':False, 'wrong':col})
new_data = []
for piece in data:
ed = {}
for col in cols:
ed[col] = piece[col]
new_data.append(ed)
return jsonify({"data": new_data}), 200
@app.route('/where/<commands>', methods=["GET"])
def where_command(commands):
'''
Execute Where commands using my own made up syntax
'''
calls = commands_parser(commands)
data = get_where_json(calls)
return jsonify({"data": data}), 200
@app.route('/number/<commands>', methods=["GET"])
def Number_signup(commands):
'''
Add number to text database
'''
info = commands.split("+")
conn = connect()
mycursor = conn.cursor()
mycursor.execute("INSERT INTO kamernet_sms (name, number, price_min, price_max) VALUES ('{}', '{}', 0, 750);"
.format(info[0], info[1]))
conn.commit()
return jsonify({"data": True}), 200
@app.route('/removenumber/<commands>', methods=["GET"])
def Number_remove(commands):
'''
Remove number from text database
'''
info = commands.split("+")
conn = connect()
mycursor = conn.cursor()
mycursor.execute("DELETE FROM kamernet_sms WHERE name='{}' AND number='{}'"
.format(info[0], info[1]))
conn.commit()
return jsonify({"data": True}), 200
@app.route('/loc/<range>/<commands>', methods=["GET"])
def location(range, commands):
'''
returns only rooms that are within a certain range (km) circle from CENTER
'''
calls = commands_parser(commands)
data = get_where_json(calls)
new_data = []
for piece in data:
dinstance = ((abs(piece['lat'] - CENTER[0]))**2 +
(abs(piece['lon']-CENTER[1]))**2)**0.5
# 111 is the differnence between 1 km and 1 degree +-
if (int(range) > dinstance*111):
new_data.append(piece)
return jsonify({"data": new_data}), 200
@app.route('/predict/<commands>', methods=["GET"])
def price_predict(commands):
'''
Very bad prediction of the price, but fun to make
'''
vars = commands.split("&")
vars = [float(i.replace('+','.')) for i in vars]
# help: vars = [lat, lon, size, tijd]
# this makes a prediction
price = predict_price(vars[0], vars[1], vars[2], vars[3])
return jsonify({"price": list(price)[0]}), 200
@app.route('/due_date', methods=["GET"])
def due_dates():
'''
Returns all the due dates for the calander
'''
conn = connect()
mycursor = conn.cursor()
mycursor.execute("Select count(*), due_date from kamernet group by due_date")
raw_data = mycursor.fetchall()
data = []
for raw in raw_data:
# Only 2019 support
if raw[1].strftime("%Y") != '2019':
continue
# Parse data in usable format
data.append({'date':raw[1].strftime("%d/%m/%Y")[:-4]+'19',
'value':raw[0]})
return jsonify({"data":data}), 200
@app.route('/hourly', methods=["GET"])
def hour_histogram():
'''
Returns the data for the hour histogram
'''
conn = connect()
mycursor = conn.cursor()
mycursor.execute("SELECT * FROM kamernet_log")
all_data = mycursor.fetchall()
# Make hour avarage
d_hour = {}
for i in all_data:
if i[1] > 30:
continue
hour = i[2].hour
if hour in d_hour:
d_hour[hour][0] += i[1]
d_hour[hour][1] += 1
else:
d_hour[hour] = [i[1], 1]
# Time stamp
now = datetime.datetime.now()
date_time = now.strftime("%Y-%m-%d 00:00:00")
# Get this days data --> currently using only last one
mycursor.execute("SELECT * FROM kamernet_log WHERE date>'{}'"
.format(date_time))
last = mycursor.fetchall()
count = last[-1][1]
hour = last[-1][2].hour
hist_data = []
# Parse data and return
for i in d_hour:
# I recently found out the date on my server is 2 hours behind....
HOURS_BIHIND = 2
hist_data.append({'hours':(i+HOURS_BIHIND)%24,
'values':round((d_hour[i][0]/d_hour[i][1]), 2)})
return jsonify({"data":
{'histogram': hist_data,
"current":
{"hour":hour,"value":count}
}}), 200
def connect():
""" Connect to MySQL database """
try:
conn = mysql.connector.connect(host='192.168.127.12',
database='first',
user='root',
password='<PASSWORD>')
if conn.is_connected():
print('Connected to MySQL database')
except Error as e:
print(e)
return conn
def get_full_json(col = '*'):
'''
Just fetch everything
'''
conn = connect()
mycursor = conn.cursor()
mycursor.execute('''SELECT '''+ col +''' FROM kamernet''')
row_headers=[x[0] for x in mycursor.description]
rv = mycursor.fetchall()
json_data = make_json(row_headers, rv)
return json_data
def get_where_json(commands):
'''
Fetch every thing where ....
'''
conn = connect()
mycursor = conn.cursor()
call = '''SELECT * FROM kamernet'''
call = call + " WHERE"
for com in commands:
call = call + com + 'AND'
call = call[:-4]
mycursor.execute(call)
row_headers=[x[0] for x in mycursor.description]
rv = mycursor.fetchall()
json_data = make_json(row_headers, rv)
return json_data
def predict_price(lat, lon, size, tijd):
'''
Price prediction machine learning magic!
'''
# Get data from mysql
conn = connect()
mycursor = conn.cursor()
mycursor.execute('''SELECT * FROM kamernet''')
# PArse data into json
row_headers=[x[0] for x in mycursor.description]
rv = mycursor.fetchall()
json_data = make_json(row_headers, rv)
# prepare data
df = pd.DataFrame(json_data)
df_dropped = df.drop(['loc', 'url', 'start_date', 'id', 'due_date'], 1)
x = np.array(df_dropped.drop(['price'],1))
df_dropped.dropna(inplace=True)
y = np.array(df_dropped['price'])
# Make Regression model
clf = SVR(kernel='rbf',
C=100, gamma='auto',
degree=3,
epsilon=.1,
coef0=1)
# Train Regression model
clf.fit(x, y)
# Predict with the new data
price = clf.predict(np.array([[lat, lon, size, tijd]]))
return price
def make_json(row_headers, data):
'''
Make a json from the sql list in list
'''
json_data=[]
for result in data:
result = list(result)
result[2] = str(result[2])
result[3] = str(result[3])
json_data.append(dict(zip(row_headers,result)))
return json_data
def from_date(arg, year, month, day):
'''
Date format function
'''
date_time = "{Y}-{m}-{d} 00:00:00".format(Y=year, m=month, d=day)
call = " {} >='{}' ".format(arg, date_time)
return call
def till_date(arg, year, month, day):
'''
Date format function
'''
date_time = "{Y}-{m}-{d} 00:00:00".format(Y=year, m=month, d=day)
call = " {} <='{}' ".format(arg, date_time)
return call
def periods(arg, year, month, day, year2, month2, day2):
'''
Date format function
'''
date_time = "{Y}-{m}-{d} 00:00:00".format(Y=year, m=month, d=day)
date_time2 = "{Y}-{m}-{d} 00:00:00".format(Y=year2, m=month2, d=day2)
call = " {0} >='{1}' AND {0} <= '{2}' ".format(arg, date_time, date_time2)
return call
def commands_parser(raw_url):
'''
Universal, but unique command parser :D
returns list of calls - no string
'''
commands = raw_url.split("&")
calls = []
for com in commands:
if 'from' in com and 'till' in com:
period = com.split("+")
period0 = period[0].split('=')
period1 = period[1].split('=')
t0 = period0[1][4:].split("$")
t1 = period1[1][4:].split("$")
call = periods(period0[0], t0[0], t0[1], t0[2],
t1[0], t1[1], t1[2])
elif 'from' in com:
print(1, com)
period0 = com.split('=')
t0 = period0[1][4:].split("$")
call = from_date(period0[0], t0[0], t0[1], t0[2])
elif 'till' in com:
period0 = com.split('=')
t0 = period0[1][4:].split("$")
call = till_date(period0[0], t0[0], t0[1], t0[2])
else:
arg = [i for i in com if not i.isdigit()]
value = [i for i in com if i.isdigit()]
operator = arg[-1].replace('+','>').replace('-','<')
arg = ''.join(arg[:-1])
value = ''.join(value)
call = " {} {} {} ".format(arg, operator, value)
calls.append(call)
return calls
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
| from flask import Flask, jsonify, request
from flask_reverse_proxy_fix.middleware import ReverseProxyPrefixFix
from flask_cors import CORS
import mysql.connector
from mysql.connector import Error
import json
from sklearn.svm import SVR
import pandas as pd
import numpy as np
import datetime
# Setup the API to work Cross orgin and behind a proxy
app = Flask(__name__)
app.config['REVERSE_PROXY_PATH'] = '/foo'
ReverseProxyPrefixFix(app)
CORS(app)
# Center of Amsterdam
CENTER = (52.367612, 4.893884)
@app.route('/', methods=['GET', 'POST'])
def index():
'''
Just return allllll data and test with jsons
'''
if (request.method == 'POST'):
some_json = request.get_json()
return jsonify({"you sent":some_json}), 201
else:
return jsonify({"data":get_full_json()}), 200
# Get last <num> uploads, NOT IN USE FOR PROJECT
@app.route('/last/<int:num>',methods=["GET"])
def choose_random(num):
data = get_full_json()
return jsonify({"data": data[-num:]}), 200
# Get certain columns, NOT IN USE FOR PROJECT
@app.route('/<column>',methods=["GET"])
def roll_dice(column):
cols = column.split('+')
cols = [i for i in cols if i != '']
data = get_full_json()
for col in cols:
if not col in list(data[0].keys()):
return jsonify({'data':False, 'wrong':col})
new_data = []
for piece in data:
ed = {}
for col in cols:
ed[col] = piece[col]
new_data.append(ed)
return jsonify({"data": new_data}), 200
@app.route('/where/<commands>', methods=["GET"])
def where_command(commands):
'''
Execute Where commands using my own made up syntax
'''
calls = commands_parser(commands)
data = get_where_json(calls)
return jsonify({"data": data}), 200
@app.route('/number/<commands>', methods=["GET"])
def Number_signup(commands):
'''
Add number to text database
'''
info = commands.split("+")
conn = connect()
mycursor = conn.cursor()
mycursor.execute("INSERT INTO kamernet_sms (name, number, price_min, price_max) VALUES ('{}', '{}', 0, 750);"
.format(info[0], info[1]))
conn.commit()
return jsonify({"data": True}), 200
@app.route('/removenumber/<commands>', methods=["GET"])
def Number_remove(commands):
'''
Remove number from text database
'''
info = commands.split("+")
conn = connect()
mycursor = conn.cursor()
mycursor.execute("DELETE FROM kamernet_sms WHERE name='{}' AND number='{}'"
.format(info[0], info[1]))
conn.commit()
return jsonify({"data": True}), 200
@app.route('/loc/<range>/<commands>', methods=["GET"])
def location(range, commands):
'''
returns only rooms that are within a certain range (km) circle from CENTER
'''
calls = commands_parser(commands)
data = get_where_json(calls)
new_data = []
for piece in data:
dinstance = ((abs(piece['lat'] - CENTER[0]))**2 +
(abs(piece['lon']-CENTER[1]))**2)**0.5
# 111 is the differnence between 1 km and 1 degree +-
if (int(range) > dinstance*111):
new_data.append(piece)
return jsonify({"data": new_data}), 200
@app.route('/predict/<commands>', methods=["GET"])
def price_predict(commands):
'''
Very bad prediction of the price, but fun to make
'''
vars = commands.split("&")
vars = [float(i.replace('+','.')) for i in vars]
# help: vars = [lat, lon, size, tijd]
# this makes a prediction
price = predict_price(vars[0], vars[1], vars[2], vars[3])
return jsonify({"price": list(price)[0]}), 200
@app.route('/due_date', methods=["GET"])
def due_dates():
'''
Returns all the due dates for the calander
'''
conn = connect()
mycursor = conn.cursor()
mycursor.execute("Select count(*), due_date from kamernet group by due_date")
raw_data = mycursor.fetchall()
data = []
for raw in raw_data:
# Only 2019 support
if raw[1].strftime("%Y") != '2019':
continue
# Parse data in usable format
data.append({'date':raw[1].strftime("%d/%m/%Y")[:-4]+'19',
'value':raw[0]})
return jsonify({"data":data}), 200
@app.route('/hourly', methods=["GET"])
def hour_histogram():
'''
Returns the data for the hour histogram
'''
conn = connect()
mycursor = conn.cursor()
mycursor.execute("SELECT * FROM kamernet_log")
all_data = mycursor.fetchall()
# Make hour avarage
d_hour = {}
for i in all_data:
if i[1] > 30:
continue
hour = i[2].hour
if hour in d_hour:
d_hour[hour][0] += i[1]
d_hour[hour][1] += 1
else:
d_hour[hour] = [i[1], 1]
# Time stamp
now = datetime.datetime.now()
date_time = now.strftime("%Y-%m-%d 00:00:00")
# Get this days data --> currently using only last one
mycursor.execute("SELECT * FROM kamernet_log WHERE date>'{}'"
.format(date_time))
last = mycursor.fetchall()
count = last[-1][1]
hour = last[-1][2].hour
hist_data = []
# Parse data and return
for i in d_hour:
# I recently found out the date on my server is 2 hours behind....
HOURS_BIHIND = 2
hist_data.append({'hours':(i+HOURS_BIHIND)%24,
'values':round((d_hour[i][0]/d_hour[i][1]), 2)})
return jsonify({"data":
{'histogram': hist_data,
"current":
{"hour":hour,"value":count}
}}), 200
def connect():
""" Connect to MySQL database """
try:
conn = mysql.connector.connect(host='192.168.127.12',
database='first',
user='root',
password='<PASSWORD>')
if conn.is_connected():
print('Connected to MySQL database')
except Error as e:
print(e)
return conn
def get_full_json(col = '*'):
'''
Just fetch everything
'''
conn = connect()
mycursor = conn.cursor()
mycursor.execute('''SELECT '''+ col +''' FROM kamernet''')
row_headers=[x[0] for x in mycursor.description]
rv = mycursor.fetchall()
json_data = make_json(row_headers, rv)
return json_data
def get_where_json(commands):
'''
Fetch every thing where ....
'''
conn = connect()
mycursor = conn.cursor()
call = '''SELECT * FROM kamernet'''
call = call + " WHERE"
for com in commands:
call = call + com + 'AND'
call = call[:-4]
mycursor.execute(call)
row_headers=[x[0] for x in mycursor.description]
rv = mycursor.fetchall()
json_data = make_json(row_headers, rv)
return json_data
def predict_price(lat, lon, size, tijd):
'''
Price prediction machine learning magic!
'''
# Get data from mysql
conn = connect()
mycursor = conn.cursor()
mycursor.execute('''SELECT * FROM kamernet''')
# PArse data into json
row_headers=[x[0] for x in mycursor.description]
rv = mycursor.fetchall()
json_data = make_json(row_headers, rv)
# prepare data
df = pd.DataFrame(json_data)
df_dropped = df.drop(['loc', 'url', 'start_date', 'id', 'due_date'], 1)
x = np.array(df_dropped.drop(['price'],1))
df_dropped.dropna(inplace=True)
y = np.array(df_dropped['price'])
# Make Regression model
clf = SVR(kernel='rbf',
C=100, gamma='auto',
degree=3,
epsilon=.1,
coef0=1)
# Train Regression model
clf.fit(x, y)
# Predict with the new data
price = clf.predict(np.array([[lat, lon, size, tijd]]))
return price
def make_json(row_headers, data):
'''
Make a json from the sql list in list
'''
json_data=[]
for result in data:
result = list(result)
result[2] = str(result[2])
result[3] = str(result[3])
json_data.append(dict(zip(row_headers,result)))
return json_data
def from_date(arg, year, month, day):
'''
Date format function
'''
date_time = "{Y}-{m}-{d} 00:00:00".format(Y=year, m=month, d=day)
call = " {} >='{}' ".format(arg, date_time)
return call
def till_date(arg, year, month, day):
'''
Date format function
'''
date_time = "{Y}-{m}-{d} 00:00:00".format(Y=year, m=month, d=day)
call = " {} <='{}' ".format(arg, date_time)
return call
def periods(arg, year, month, day, year2, month2, day2):
'''
Date format function
'''
date_time = "{Y}-{m}-{d} 00:00:00".format(Y=year, m=month, d=day)
date_time2 = "{Y}-{m}-{d} 00:00:00".format(Y=year2, m=month2, d=day2)
call = " {0} >='{1}' AND {0} <= '{2}' ".format(arg, date_time, date_time2)
return call
def commands_parser(raw_url):
'''
Universal, but unique command parser :D
returns list of calls - no string
'''
commands = raw_url.split("&")
calls = []
for com in commands:
if 'from' in com and 'till' in com:
period = com.split("+")
period0 = period[0].split('=')
period1 = period[1].split('=')
t0 = period0[1][4:].split("$")
t1 = period1[1][4:].split("$")
call = periods(period0[0], t0[0], t0[1], t0[2],
t1[0], t1[1], t1[2])
elif 'from' in com:
print(1, com)
period0 = com.split('=')
t0 = period0[1][4:].split("$")
call = from_date(period0[0], t0[0], t0[1], t0[2])
elif 'till' in com:
period0 = com.split('=')
t0 = period0[1][4:].split("$")
call = till_date(period0[0], t0[0], t0[1], t0[2])
else:
arg = [i for i in com if not i.isdigit()]
value = [i for i in com if i.isdigit()]
operator = arg[-1].replace('+','>').replace('-','<')
arg = ''.join(arg[:-1])
value = ''.join(value)
call = " {} {} {} ".format(arg, operator, value)
calls.append(call)
return calls
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0') | en | 0.688102 | # Setup the API to work Cross orgin and behind a proxy # Center of Amsterdam Just return allllll data and test with jsons # Get last <num> uploads, NOT IN USE FOR PROJECT # Get certain columns, NOT IN USE FOR PROJECT Execute Where commands using my own made up syntax Add number to text database Remove number from text database returns only rooms that are within a certain range (km) circle from CENTER # 111 is the differnence between 1 km and 1 degree +- Very bad prediction of the price, but fun to make # help: vars = [lat, lon, size, tijd] # this makes a prediction Returns all the due dates for the calander # Only 2019 support # Parse data in usable format Returns the data for the hour histogram # Make hour avarage # Time stamp # Get this days data --> currently using only last one # Parse data and return # I recently found out the date on my server is 2 hours behind.... Connect to MySQL database Just fetch everything SELECT FROM kamernet Fetch every thing where .... SELECT * FROM kamernet Price prediction machine learning magic! # Get data from mysql SELECT * FROM kamernet # PArse data into json # prepare data # Make Regression model # Train Regression model # Predict with the new data Make a json from the sql list in list Date format function Date format function Date format function Universal, but unique command parser :D returns list of calls - no string | 2.503364 | 3 |
hotpot_km/tests/test_limited_manager.py | datalayer-externals/jupyter-kernel-manager-pool | 4 | 6618774 | <reponame>datalayer-externals/jupyter-kernel-manager-pool
from contextlib import asynccontextmanager
from subprocess import PIPE
from tornado.testing import gen_test
from traitlets.config.loader import Config
try:
from .. import (
LimitedKernelManager,
MaximumKernelsException,
)
except ImportError:
pass
from .utils import TestAsyncKernelManager
# Test that it works as normal with default config
class TestLimitedKernelManager(TestAsyncKernelManager):
__test__ = True
# static so picklable for multiprocessing on Windows
@staticmethod
@asynccontextmanager
async def _get_tcp_km():
c = Config()
km = LimitedKernelManager(config=c)
try:
yield km
finally:
await km.shutdown_all(now=True)
# Test that it works with a max of 4
class TestLimitedKernelManagerApplied(TestAsyncKernelManager):
__test__ = True
# static so picklable for multiprocessing on Windows
@staticmethod
@asynccontextmanager
async def _get_tcp_km():
c = Config()
c.LimitedKernelManager.max_kernels = 4
km = LimitedKernelManager(config=c)
try:
yield km
finally:
await km.shutdown_all()
@gen_test(timeout=60)
async def test_touch_max(self):
async with self._get_tcp_km() as km:
kids = []
for i in range(4):
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
# Remove and add one to make sure we don't count closed kernels
await km.shutdown_kernel(kid)
self.assertNotIn(kid, km)
kids.pop()
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
await km.shutdown_all()
for kid in kids:
self.assertNotIn(kid, km)
@gen_test(timeout=60)
async def test_breach_max(self):
async with self._get_tcp_km() as km:
kids = []
for i in range(4):
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
with self.assertRaises(MaximumKernelsException):
await km.start_kernel(stdout=PIPE, stderr=PIPE)
# Remove and add one to make sure we correctly recovered
await km.shutdown_kernel(kid)
self.assertNotIn(kid, km)
kids.pop()
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
await km.shutdown_all()
for kid in kids:
self.assertNotIn(kid, km)
| from contextlib import asynccontextmanager
from subprocess import PIPE
from tornado.testing import gen_test
from traitlets.config.loader import Config
try:
from .. import (
LimitedKernelManager,
MaximumKernelsException,
)
except ImportError:
pass
from .utils import TestAsyncKernelManager
# Test that it works as normal with default config
class TestLimitedKernelManager(TestAsyncKernelManager):
__test__ = True
# static so picklable for multiprocessing on Windows
@staticmethod
@asynccontextmanager
async def _get_tcp_km():
c = Config()
km = LimitedKernelManager(config=c)
try:
yield km
finally:
await km.shutdown_all(now=True)
# Test that it works with a max of 4
class TestLimitedKernelManagerApplied(TestAsyncKernelManager):
__test__ = True
# static so picklable for multiprocessing on Windows
@staticmethod
@asynccontextmanager
async def _get_tcp_km():
c = Config()
c.LimitedKernelManager.max_kernels = 4
km = LimitedKernelManager(config=c)
try:
yield km
finally:
await km.shutdown_all()
@gen_test(timeout=60)
async def test_touch_max(self):
async with self._get_tcp_km() as km:
kids = []
for i in range(4):
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
# Remove and add one to make sure we don't count closed kernels
await km.shutdown_kernel(kid)
self.assertNotIn(kid, km)
kids.pop()
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
await km.shutdown_all()
for kid in kids:
self.assertNotIn(kid, km)
@gen_test(timeout=60)
async def test_breach_max(self):
async with self._get_tcp_km() as km:
kids = []
for i in range(4):
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
with self.assertRaises(MaximumKernelsException):
await km.start_kernel(stdout=PIPE, stderr=PIPE)
# Remove and add one to make sure we correctly recovered
await km.shutdown_kernel(kid)
self.assertNotIn(kid, km)
kids.pop()
kid = await km.start_kernel(stdout=PIPE, stderr=PIPE)
self.assertIn(kid, km)
kids.append(kid)
await km.shutdown_all()
for kid in kids:
self.assertNotIn(kid, km) | en | 0.900127 | # Test that it works as normal with default config # static so picklable for multiprocessing on Windows # Test that it works with a max of 4 # static so picklable for multiprocessing on Windows # Remove and add one to make sure we don't count closed kernels # Remove and add one to make sure we correctly recovered | 1.954423 | 2 |
apps/odoo/lib/odoo-10.0.post20170615-py2.7.egg/odoo/addons/l10n_be_hr_payroll/models/l10n_be_hr_payroll.py | gtfarng/Odoo_migrade | 1 | 6618775 | # -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
from odoo.addons import decimal_precision as dp
class HrContract(models.Model):
_inherit = 'hr.contract'
travel_reimbursement_amount = fields.Float(string='Reimbursement of travel expenses', digits=dp.get_precision('Payroll'))
car_company_amount = fields.Float(string='Company car employer', digits=dp.get_precision('Payroll'))
car_employee_deduction = fields.Float(string='Company Car Deduction for Worker', digits=dp.get_precision('Payroll'))
misc_onss_deduction = fields.Float(string='Miscellaneous exempt ONSS', digits=dp.get_precision('Payroll'))
meal_voucher_amount = fields.Float(string='Check Value Meal', digits=dp.get_precision('Payroll'))
meal_voucher_employee_deduction = fields.Float(string='Check Value Meal - by worker', digits=dp.get_precision('Payroll'))
insurance_employee_deduction = fields.Float(string='Insurance Group - by worker', digits=dp.get_precision('Payroll'))
misc_advantage_amount = fields.Float(string='Benefits of various nature', digits=dp.get_precision('Payroll'))
additional_net_amount = fields.Float(string='Net supplements', digits=dp.get_precision('Payroll'))
retained_net_amount = fields.Float('Net retained ', digits=dp.get_precision('Payroll'))
class HrEmployee(models.Model):
_inherit = 'hr.employee'
spouse_fiscal_status = fields.Selection([('without income','Without Income'),('with income','With Income')], string='Tax status for spouse')
disabled_spouse_bool = fields.Boolean(string='Disabled Spouse', help='if recipient spouse is declared disabled by law')
disabled_children_bool = fields.Boolean(string='Disabled Children', help='if recipient children is/are declared disabled by law')
resident_bool = fields.Boolean(string='Nonresident', help='if recipient lives in a foreign country')
disabled_children_number = fields.Integer('Number of disabled children')
| # -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import fields, models
from odoo.addons import decimal_precision as dp
class HrContract(models.Model):
_inherit = 'hr.contract'
travel_reimbursement_amount = fields.Float(string='Reimbursement of travel expenses', digits=dp.get_precision('Payroll'))
car_company_amount = fields.Float(string='Company car employer', digits=dp.get_precision('Payroll'))
car_employee_deduction = fields.Float(string='Company Car Deduction for Worker', digits=dp.get_precision('Payroll'))
misc_onss_deduction = fields.Float(string='Miscellaneous exempt ONSS', digits=dp.get_precision('Payroll'))
meal_voucher_amount = fields.Float(string='Check Value Meal', digits=dp.get_precision('Payroll'))
meal_voucher_employee_deduction = fields.Float(string='Check Value Meal - by worker', digits=dp.get_precision('Payroll'))
insurance_employee_deduction = fields.Float(string='Insurance Group - by worker', digits=dp.get_precision('Payroll'))
misc_advantage_amount = fields.Float(string='Benefits of various nature', digits=dp.get_precision('Payroll'))
additional_net_amount = fields.Float(string='Net supplements', digits=dp.get_precision('Payroll'))
retained_net_amount = fields.Float('Net retained ', digits=dp.get_precision('Payroll'))
class HrEmployee(models.Model):
_inherit = 'hr.employee'
spouse_fiscal_status = fields.Selection([('without income','Without Income'),('with income','With Income')], string='Tax status for spouse')
disabled_spouse_bool = fields.Boolean(string='Disabled Spouse', help='if recipient spouse is declared disabled by law')
disabled_children_bool = fields.Boolean(string='Disabled Children', help='if recipient children is/are declared disabled by law')
resident_bool = fields.Boolean(string='Nonresident', help='if recipient lives in a foreign country')
disabled_children_number = fields.Integer('Number of disabled children')
| en | 0.861637 | # -*- coding:utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. | 1.993769 | 2 |
src/waldur_azure/models.py | opennode/nodeconductor-azure | 2 | 6618776 | <filename>src/waldur_azure/models.py<gh_stars>1-10
from __future__ import unicode_literals
from django.core.validators import MaxValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from waldur_core.core import models as core_models
from waldur_core.core.fields import JSONField
from waldur_core.quotas.fields import CounterQuotaField
from waldur_core.quotas.models import QuotaModelMixin
from waldur_core.structure import models as structure_models
class AzureService(structure_models.Service):
Locations = (('Central US', 'Central US'),
('East US 2', 'East US 2'),
('South Central US', 'South Central US'),
('North Europe', 'North Europe'),
('East Asia', 'East Asia'),
('Southeast Asia', 'Southeast Asia'),
('Japan West', 'Japan West'))
projects = models.ManyToManyField(
structure_models.Project, related_name='azure_services', through='AzureServiceProjectLink')
@classmethod
def get_url_name(cls):
return 'azure'
class Quotas(QuotaModelMixin.Quotas):
vm_count = CounterQuotaField(
target_models=lambda: [VirtualMachine],
path_to_scope='service_project_link.service'
)
class AzureServiceProjectLink(structure_models.ServiceProjectLink):
service = models.ForeignKey(AzureService)
cloud_service_name = models.CharField(max_length=255, blank=True)
def get_backend(self):
return super(AzureServiceProjectLink, self).get_backend(
cloud_service_name=self.cloud_service_name)
@classmethod
def get_url_name(cls):
return 'azure-spl'
class Image(structure_models.GeneralServiceProperty):
@classmethod
def get_url_name(cls):
return 'azure-image'
class Size(object):
_meta = 'size'
@classmethod
def get_url_name(cls):
return 'azure-size'
class InstanceEndpoint(core_models.BackendModelMixin, models.Model):
class Protocol(object):
TCP = 'tcp'
UDP = 'udp'
CHOICES = (
(TCP, 'tcp'),
(UDP, 'udp'),
)
class Name(object):
SSH = 'SSH'
RDP = 'Remote Desktop'
CHOICES = (
(SSH, 'SSH'),
(RDP, 'Remote Desktop'),
)
local_port = models.IntegerField(validators=[MaxValueValidator(65535)])
public_port = models.IntegerField(validators=[MaxValueValidator(65535)])
protocol = models.CharField(max_length=3, blank=True, choices=Protocol.CHOICES)
name = models.CharField(max_length=255, blank=True, choices=Name.CHOICES)
instance = models.ForeignKey('VirtualMachine', related_name='endpoints', on_delete=models.PROTECT)
@classmethod
def get_backend_fields(cls):
return super(InstanceEndpoint, cls).get_backend_fields() + (
'local_port', 'public_port', 'protocol', 'name', 'vm',
)
class VirtualMachine(structure_models.VirtualMachine):
service_project_link = models.ForeignKey(
AzureServiceProjectLink, related_name='virtualmachines', on_delete=models.PROTECT)
public_ips = JSONField(default=list, help_text=_('List of public IP addresses'), blank=True)
private_ips = JSONField(default=list, help_text=_('List of private IP addresses'), blank=True)
user_username = models.CharField(max_length=50)
user_password = models.CharField(max_length=50)
@classmethod
def get_url_name(cls):
return 'azure-virtualmachine'
def get_access_url_name(self):
return 'azure-virtualmachine-rdp'
@property
def external_ips(self):
return self.public_ips
@property
def internal_ips(self):
return self.private_ips
@classmethod
def get_backend_fields(cls):
return super(VirtualMachine, cls).get_backend_fields() + ('public_ips', 'private_ips', 'endpoints')
| <filename>src/waldur_azure/models.py<gh_stars>1-10
from __future__ import unicode_literals
from django.core.validators import MaxValueValidator
from django.db import models
from django.utils.translation import ugettext_lazy as _
from waldur_core.core import models as core_models
from waldur_core.core.fields import JSONField
from waldur_core.quotas.fields import CounterQuotaField
from waldur_core.quotas.models import QuotaModelMixin
from waldur_core.structure import models as structure_models
class AzureService(structure_models.Service):
Locations = (('Central US', 'Central US'),
('East US 2', 'East US 2'),
('South Central US', 'South Central US'),
('North Europe', 'North Europe'),
('East Asia', 'East Asia'),
('Southeast Asia', 'Southeast Asia'),
('Japan West', 'Japan West'))
projects = models.ManyToManyField(
structure_models.Project, related_name='azure_services', through='AzureServiceProjectLink')
@classmethod
def get_url_name(cls):
return 'azure'
class Quotas(QuotaModelMixin.Quotas):
vm_count = CounterQuotaField(
target_models=lambda: [VirtualMachine],
path_to_scope='service_project_link.service'
)
class AzureServiceProjectLink(structure_models.ServiceProjectLink):
service = models.ForeignKey(AzureService)
cloud_service_name = models.CharField(max_length=255, blank=True)
def get_backend(self):
return super(AzureServiceProjectLink, self).get_backend(
cloud_service_name=self.cloud_service_name)
@classmethod
def get_url_name(cls):
return 'azure-spl'
class Image(structure_models.GeneralServiceProperty):
@classmethod
def get_url_name(cls):
return 'azure-image'
class Size(object):
_meta = 'size'
@classmethod
def get_url_name(cls):
return 'azure-size'
class InstanceEndpoint(core_models.BackendModelMixin, models.Model):
class Protocol(object):
TCP = 'tcp'
UDP = 'udp'
CHOICES = (
(TCP, 'tcp'),
(UDP, 'udp'),
)
class Name(object):
SSH = 'SSH'
RDP = 'Remote Desktop'
CHOICES = (
(SSH, 'SSH'),
(RDP, 'Remote Desktop'),
)
local_port = models.IntegerField(validators=[MaxValueValidator(65535)])
public_port = models.IntegerField(validators=[MaxValueValidator(65535)])
protocol = models.CharField(max_length=3, blank=True, choices=Protocol.CHOICES)
name = models.CharField(max_length=255, blank=True, choices=Name.CHOICES)
instance = models.ForeignKey('VirtualMachine', related_name='endpoints', on_delete=models.PROTECT)
@classmethod
def get_backend_fields(cls):
return super(InstanceEndpoint, cls).get_backend_fields() + (
'local_port', 'public_port', 'protocol', 'name', 'vm',
)
class VirtualMachine(structure_models.VirtualMachine):
service_project_link = models.ForeignKey(
AzureServiceProjectLink, related_name='virtualmachines', on_delete=models.PROTECT)
public_ips = JSONField(default=list, help_text=_('List of public IP addresses'), blank=True)
private_ips = JSONField(default=list, help_text=_('List of private IP addresses'), blank=True)
user_username = models.CharField(max_length=50)
user_password = models.CharField(max_length=50)
@classmethod
def get_url_name(cls):
return 'azure-virtualmachine'
def get_access_url_name(self):
return 'azure-virtualmachine-rdp'
@property
def external_ips(self):
return self.public_ips
@property
def internal_ips(self):
return self.private_ips
@classmethod
def get_backend_fields(cls):
return super(VirtualMachine, cls).get_backend_fields() + ('public_ips', 'private_ips', 'endpoints')
| none | 1 | 1.958351 | 2 | |
shop/views.py | inoks138/django-shop | 0 | 6618777 | <filename>shop/views.py
import json
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.views import View
from django.views.generic import DetailView, ListView
from mptt.querysets import TreeQuerySet
from shop.forms import AddCommentForm
from shop.models import Comment, CommentDislike, CommentLike
from .models import Product, Category, Brand
from cart.forms import CartAddProductForm
from .tasks import send_notification_mail
from .templatetags.comments_tags import calc_date
def index(request):
return render(request, 'shop/index.html')
class ViewProduct(DetailView):
model = Product
context_object_name = 'product'
allow_empty = False
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['cart_product_form'] = CartAddProductForm()
context['add_form'] = AddCommentForm()
if self.request.user.is_authenticated:
query_for_liked_comments = f"""
SELECT comment_id as id
FROM shop_commentlike
JOIN
(SELECT commentlike_id
FROM shop_commentlike_users
WHERE account_id = {self.request.user.id})
ON(id=commentlike_id)"""
query_for_disliked_comments = f"""
SELECT comment_id as id
FROM shop_commentdislike
JOIN
(SELECT commentdislike_id
FROM shop_commentdislike_users
WHERE account_id = {self.request.user.id})
ON(id=commentdislike_id)"""
context['liked'] = [comment.id for comment in Comment.objects.raw(query_for_liked_comments)]
context['disliked'] = [comment.id for comment in Comment.objects.raw(query_for_disliked_comments)]
query_for_root = f"""
SELECT id, lft, rght, rating FROM shop_comment
JOIN
(SELECT id, likes - dislikes as rating
FROM
(SELECT id, IFNULL(likes, 0) as likes
FROM shop_comment
LEFT JOIN
(SELECT shop_comment.id as id, COUNT() as likes FROM shop_comment
JOIN shop_commentlike ON shop_commentlike.comment_id = shop_comment.id
JOIN shop_commentlike_users ON shop_commentlike.id = shop_commentlike_users.commentlike_id
GROUP BY shop_comment.id)
USING(id))
LEFT JOIN
(SELECT id, IFNULL(dislikes, 0) as dislikes
FROM shop_comment
LEFT JOIN
(SELECT shop_comment.id as id, COUNT() as dislikes FROM shop_comment
JOIN shop_commentdislike ON shop_commentdislike.comment_id = shop_comment.id
JOIN shop_commentdislike_users ON shop_commentdislike.id = shop_commentdislike_users.commentdislike_id
GROUP BY shop_comment.id)
USING(id))
USING(id))
USING(id)
WHERE level = 0 and product_id = {context['product'].id}
ORDER BY tree_id ASC, lft ASC, id ASC"""
query_for_all = f"""
SELECT id, rating FROM shop_comment
JOIN
(SELECT id, likes - dislikes as rating
FROM
(SELECT id, IFNULL(likes, 0) as likes
FROM shop_comment
LEFT JOIN
(SELECT shop_comment.id as id, COUNT() as likes FROM shop_comment
JOIN shop_commentlike ON shop_commentlike.comment_id = shop_comment.id
JOIN shop_commentlike_users ON shop_commentlike.id = shop_commentlike_users.commentlike_id
GROUP BY shop_comment.id)
USING(id))
LEFT JOIN
(SELECT id, IFNULL(dislikes, 0) as dislikes
FROM shop_comment
LEFT JOIN
(SELECT shop_comment.id as id, COUNT() as dislikes FROM shop_comment
JOIN shop_commentdislike ON shop_commentdislike.comment_id = shop_comment.id
JOIN shop_commentdislike_users ON shop_commentdislike.id = shop_commentdislike_users.commentdislike_id
GROUP BY shop_comment.id)
USING(id))
USING(id))
USING(id)
WHERE product_id = {context['product'].id}
ORDER BY tree_id ASC, lft ASC, id ASC"""
all_comments = Comment.objects.raw(query_for_all)
root_comments = Comment.objects.raw(query_for_root)
nodes = [node.get_descendants(include_self=True).select_related('user').select_related('comment_like')
.select_related('comment_dislike') for node in root_comments]
index = 0
for comment_tree in nodes:
for comment in comment_tree:
comment.rating = all_comments[index].rating
index += 1
paginator = Paginator(nodes, 6)
page_number = self.request.GET.get('page')
context['page_obj'] = paginator.get_page(page_number)
return context
class ProductsByCategory(DetailView):
model = Category
template_name = 'shop/catalog_category.html'
context_object_name = 'category'
allow_empty = False
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.all()
context['brands'] = Brand.objects.all()
context['ancestors_tree'] = context['category'].get_ancestors(include_self=True)
context['products'] = context['category'].get_products()
if 'brands' in self.request.GET:
brands = self.request.GET['brands'].split(',')
context['choosen_brands'] = Brand.objects.filter(slug__in=brands)
context['products'] = context['products'].filter(brand__slug__in=brands)
paginator = Paginator(context['products'], 6)
page_number = self.request.GET.get('page')
context['page_obj'] = paginator.get_page(page_number)
return context
class ProductsCatalog(ListView):
template_name = 'shop/catalog.html'
context_object_name = 'products'
def get_queryset(self):
products = Product.objects.select_related('brand').all()
return products
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.filter(level__in=[0, 1])
context['brands'] = Brand.objects.all()
if 'brands' in self.request.GET:
brands = self.request.GET['brands'].split(',')
context['choosen_brands'] = Brand.objects.filter(slug__in=brands)
context['products'] = context['products'].filter(brand__slug__in=brands)
if 'search' in self.request.GET:
search = self.request.GET['search']
context['products'] = context['products'].filter(
Q(title__iregex=search) | Q(category__title__iregex=search) | Q(brand__title__iregex=search))
paginator = Paginator(context['products'], 6)
page_number = self.request.GET.get('page')
context['page_obj'] = paginator.get_page(page_number)
return context
def get_brands(request):
return render(request, 'shop/brands.html')
class AddComment(View):
def post(self, request):
user = request.user
if not user.is_authenticated:
return JsonResponse({'message': 'Need authentication'})
data = request.POST if request.POST.get('content') else json.loads(request.body.decode("utf-8"))
product = Product.objects.get(id=data['product_id'])
content = data['content']
parent_id = data.get('parent_id')
parent = Comment.objects.get(id=parent_id) if parent_id else None
parent_is_leaf_node = parent.is_leaf_node() if parent else None
comment = Comment.objects.create(user=user, product=product, content=content, parent=parent)
comment_is_root_node = comment.is_root_node()
if parent_id:
parent_comment = Comment.objects.select_related('user').get(id=parent_id)
receiver = Comment.objects.get(id=parent_id).user
send_notification_mail.delay(
user_mail=receiver.email,
user_comment_text=str(parent_comment),
user_comment_date=parent_comment.created_at.strftime("%H:%M - %d %B %Y"),
sender_username=user.username,
product_title=product.title,
url=request.build_absolute_uri(reverse('product', kwargs={'slug': product.slug}))
)
return JsonResponse({
'id': comment.id,
'content': content,
'username': user.username,
'calculated_date': calc_date(comment.created_at),
'parent_id': parent_id,
'parent_was_leaf_node': parent_is_leaf_node,
'comment_is_root_node': comment_is_root_node,
'rating': comment.count_rating(),
'message': 'Отзыв успешно отправлен',
})
class ToggleCommentVote(View):
def post(self, request):
user = request.user
if not user.is_authenticated:
return JsonResponse({'message': 'Need authentication'})
data = request.POST if request.POST.get('comment_id') else json.loads(request.body.decode("utf-8"))
comment_id = data['comment_id']
comment = get_object_or_404(Comment, id=comment_id)
opinion = data['opinion']
try:
comment.comment_like
except Comment.comment_like.RelatedObjectDoesNotExist as identifier:
CommentLike.objects.create(comment=comment)
try:
comment.comment_dislike
except Comment.comment_dislike.RelatedObjectDoesNotExist as identifier:
CommentDislike.objects.create(comment=comment)
if opinion == 'like':
if request.user in comment.comment_like.users.all():
comment.comment_like.users.remove(request.user)
else:
comment.comment_like.users.add(request.user)
comment.comment_dislike.users.remove(request.user)
elif opinion == 'dislike':
if request.user in comment.comment_dislike.users.all():
comment.comment_dislike.users.remove(request.user)
else:
comment.comment_dislike.users.add(request.user)
comment.comment_like.users.remove(request.user)
return JsonResponse({
'rating': comment.count_rating(),
})
| <filename>shop/views.py
import json
from django.core.paginator import Paginator
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.views import View
from django.views.generic import DetailView, ListView
from mptt.querysets import TreeQuerySet
from shop.forms import AddCommentForm
from shop.models import Comment, CommentDislike, CommentLike
from .models import Product, Category, Brand
from cart.forms import CartAddProductForm
from .tasks import send_notification_mail
from .templatetags.comments_tags import calc_date
def index(request):
return render(request, 'shop/index.html')
class ViewProduct(DetailView):
model = Product
context_object_name = 'product'
allow_empty = False
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['cart_product_form'] = CartAddProductForm()
context['add_form'] = AddCommentForm()
if self.request.user.is_authenticated:
query_for_liked_comments = f"""
SELECT comment_id as id
FROM shop_commentlike
JOIN
(SELECT commentlike_id
FROM shop_commentlike_users
WHERE account_id = {self.request.user.id})
ON(id=commentlike_id)"""
query_for_disliked_comments = f"""
SELECT comment_id as id
FROM shop_commentdislike
JOIN
(SELECT commentdislike_id
FROM shop_commentdislike_users
WHERE account_id = {self.request.user.id})
ON(id=commentdislike_id)"""
context['liked'] = [comment.id for comment in Comment.objects.raw(query_for_liked_comments)]
context['disliked'] = [comment.id for comment in Comment.objects.raw(query_for_disliked_comments)]
query_for_root = f"""
SELECT id, lft, rght, rating FROM shop_comment
JOIN
(SELECT id, likes - dislikes as rating
FROM
(SELECT id, IFNULL(likes, 0) as likes
FROM shop_comment
LEFT JOIN
(SELECT shop_comment.id as id, COUNT() as likes FROM shop_comment
JOIN shop_commentlike ON shop_commentlike.comment_id = shop_comment.id
JOIN shop_commentlike_users ON shop_commentlike.id = shop_commentlike_users.commentlike_id
GROUP BY shop_comment.id)
USING(id))
LEFT JOIN
(SELECT id, IFNULL(dislikes, 0) as dislikes
FROM shop_comment
LEFT JOIN
(SELECT shop_comment.id as id, COUNT() as dislikes FROM shop_comment
JOIN shop_commentdislike ON shop_commentdislike.comment_id = shop_comment.id
JOIN shop_commentdislike_users ON shop_commentdislike.id = shop_commentdislike_users.commentdislike_id
GROUP BY shop_comment.id)
USING(id))
USING(id))
USING(id)
WHERE level = 0 and product_id = {context['product'].id}
ORDER BY tree_id ASC, lft ASC, id ASC"""
query_for_all = f"""
SELECT id, rating FROM shop_comment
JOIN
(SELECT id, likes - dislikes as rating
FROM
(SELECT id, IFNULL(likes, 0) as likes
FROM shop_comment
LEFT JOIN
(SELECT shop_comment.id as id, COUNT() as likes FROM shop_comment
JOIN shop_commentlike ON shop_commentlike.comment_id = shop_comment.id
JOIN shop_commentlike_users ON shop_commentlike.id = shop_commentlike_users.commentlike_id
GROUP BY shop_comment.id)
USING(id))
LEFT JOIN
(SELECT id, IFNULL(dislikes, 0) as dislikes
FROM shop_comment
LEFT JOIN
(SELECT shop_comment.id as id, COUNT() as dislikes FROM shop_comment
JOIN shop_commentdislike ON shop_commentdislike.comment_id = shop_comment.id
JOIN shop_commentdislike_users ON shop_commentdislike.id = shop_commentdislike_users.commentdislike_id
GROUP BY shop_comment.id)
USING(id))
USING(id))
USING(id)
WHERE product_id = {context['product'].id}
ORDER BY tree_id ASC, lft ASC, id ASC"""
all_comments = Comment.objects.raw(query_for_all)
root_comments = Comment.objects.raw(query_for_root)
nodes = [node.get_descendants(include_self=True).select_related('user').select_related('comment_like')
.select_related('comment_dislike') for node in root_comments]
index = 0
for comment_tree in nodes:
for comment in comment_tree:
comment.rating = all_comments[index].rating
index += 1
paginator = Paginator(nodes, 6)
page_number = self.request.GET.get('page')
context['page_obj'] = paginator.get_page(page_number)
return context
class ProductsByCategory(DetailView):
model = Category
template_name = 'shop/catalog_category.html'
context_object_name = 'category'
allow_empty = False
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.all()
context['brands'] = Brand.objects.all()
context['ancestors_tree'] = context['category'].get_ancestors(include_self=True)
context['products'] = context['category'].get_products()
if 'brands' in self.request.GET:
brands = self.request.GET['brands'].split(',')
context['choosen_brands'] = Brand.objects.filter(slug__in=brands)
context['products'] = context['products'].filter(brand__slug__in=brands)
paginator = Paginator(context['products'], 6)
page_number = self.request.GET.get('page')
context['page_obj'] = paginator.get_page(page_number)
return context
class ProductsCatalog(ListView):
template_name = 'shop/catalog.html'
context_object_name = 'products'
def get_queryset(self):
products = Product.objects.select_related('brand').all()
return products
def get_context_data(self, *, object_list=None, **kwargs):
context = super().get_context_data(**kwargs)
context['categories'] = Category.objects.filter(level__in=[0, 1])
context['brands'] = Brand.objects.all()
if 'brands' in self.request.GET:
brands = self.request.GET['brands'].split(',')
context['choosen_brands'] = Brand.objects.filter(slug__in=brands)
context['products'] = context['products'].filter(brand__slug__in=brands)
if 'search' in self.request.GET:
search = self.request.GET['search']
context['products'] = context['products'].filter(
Q(title__iregex=search) | Q(category__title__iregex=search) | Q(brand__title__iregex=search))
paginator = Paginator(context['products'], 6)
page_number = self.request.GET.get('page')
context['page_obj'] = paginator.get_page(page_number)
return context
def get_brands(request):
return render(request, 'shop/brands.html')
class AddComment(View):
def post(self, request):
user = request.user
if not user.is_authenticated:
return JsonResponse({'message': 'Need authentication'})
data = request.POST if request.POST.get('content') else json.loads(request.body.decode("utf-8"))
product = Product.objects.get(id=data['product_id'])
content = data['content']
parent_id = data.get('parent_id')
parent = Comment.objects.get(id=parent_id) if parent_id else None
parent_is_leaf_node = parent.is_leaf_node() if parent else None
comment = Comment.objects.create(user=user, product=product, content=content, parent=parent)
comment_is_root_node = comment.is_root_node()
if parent_id:
parent_comment = Comment.objects.select_related('user').get(id=parent_id)
receiver = Comment.objects.get(id=parent_id).user
send_notification_mail.delay(
user_mail=receiver.email,
user_comment_text=str(parent_comment),
user_comment_date=parent_comment.created_at.strftime("%H:%M - %d %B %Y"),
sender_username=user.username,
product_title=product.title,
url=request.build_absolute_uri(reverse('product', kwargs={'slug': product.slug}))
)
return JsonResponse({
'id': comment.id,
'content': content,
'username': user.username,
'calculated_date': calc_date(comment.created_at),
'parent_id': parent_id,
'parent_was_leaf_node': parent_is_leaf_node,
'comment_is_root_node': comment_is_root_node,
'rating': comment.count_rating(),
'message': 'Отзыв успешно отправлен',
})
class ToggleCommentVote(View):
def post(self, request):
user = request.user
if not user.is_authenticated:
return JsonResponse({'message': 'Need authentication'})
data = request.POST if request.POST.get('comment_id') else json.loads(request.body.decode("utf-8"))
comment_id = data['comment_id']
comment = get_object_or_404(Comment, id=comment_id)
opinion = data['opinion']
try:
comment.comment_like
except Comment.comment_like.RelatedObjectDoesNotExist as identifier:
CommentLike.objects.create(comment=comment)
try:
comment.comment_dislike
except Comment.comment_dislike.RelatedObjectDoesNotExist as identifier:
CommentDislike.objects.create(comment=comment)
if opinion == 'like':
if request.user in comment.comment_like.users.all():
comment.comment_like.users.remove(request.user)
else:
comment.comment_like.users.add(request.user)
comment.comment_dislike.users.remove(request.user)
elif opinion == 'dislike':
if request.user in comment.comment_dislike.users.all():
comment.comment_dislike.users.remove(request.user)
else:
comment.comment_dislike.users.add(request.user)
comment.comment_like.users.remove(request.user)
return JsonResponse({
'rating': comment.count_rating(),
})
| en | 0.653097 | SELECT comment_id as id FROM shop_commentlike JOIN (SELECT commentlike_id FROM shop_commentlike_users WHERE account_id = {self.request.user.id}) ON(id=commentlike_id) SELECT comment_id as id FROM shop_commentdislike JOIN (SELECT commentdislike_id FROM shop_commentdislike_users WHERE account_id = {self.request.user.id}) ON(id=commentdislike_id) SELECT id, lft, rght, rating FROM shop_comment JOIN (SELECT id, likes - dislikes as rating FROM (SELECT id, IFNULL(likes, 0) as likes FROM shop_comment LEFT JOIN (SELECT shop_comment.id as id, COUNT() as likes FROM shop_comment JOIN shop_commentlike ON shop_commentlike.comment_id = shop_comment.id JOIN shop_commentlike_users ON shop_commentlike.id = shop_commentlike_users.commentlike_id GROUP BY shop_comment.id) USING(id)) LEFT JOIN (SELECT id, IFNULL(dislikes, 0) as dislikes FROM shop_comment LEFT JOIN (SELECT shop_comment.id as id, COUNT() as dislikes FROM shop_comment JOIN shop_commentdislike ON shop_commentdislike.comment_id = shop_comment.id JOIN shop_commentdislike_users ON shop_commentdislike.id = shop_commentdislike_users.commentdislike_id GROUP BY shop_comment.id) USING(id)) USING(id)) USING(id) WHERE level = 0 and product_id = {context['product'].id} ORDER BY tree_id ASC, lft ASC, id ASC SELECT id, rating FROM shop_comment JOIN (SELECT id, likes - dislikes as rating FROM (SELECT id, IFNULL(likes, 0) as likes FROM shop_comment LEFT JOIN (SELECT shop_comment.id as id, COUNT() as likes FROM shop_comment JOIN shop_commentlike ON shop_commentlike.comment_id = shop_comment.id JOIN shop_commentlike_users ON shop_commentlike.id = shop_commentlike_users.commentlike_id GROUP BY shop_comment.id) USING(id)) LEFT JOIN (SELECT id, IFNULL(dislikes, 0) as dislikes FROM shop_comment LEFT JOIN (SELECT shop_comment.id as id, COUNT() as dislikes FROM shop_comment JOIN shop_commentdislike ON shop_commentdislike.comment_id = shop_comment.id JOIN shop_commentdislike_users ON shop_commentdislike.id = shop_commentdislike_users.commentdislike_id GROUP BY shop_comment.id) USING(id)) USING(id)) USING(id) WHERE product_id = {context['product'].id} ORDER BY tree_id ASC, lft ASC, id ASC | 2.072498 | 2 |
examples/gunshots/gunshots_app.py | AssembleSoftware/IoTPy | 28 | 6618778 | <reponame>AssembleSoftware/IoTPy<filename>examples/gunshots/gunshots_app.py<gh_stars>10-100
"""
Creates a multiprocess, multithread application to detect high
readings.
https://www.assemblesoftware.com/accelerometerexample
"""
import sys
import os
import math
import time
sys.path.append(os.path.abspath("../../IoTPy/multiprocessing"))
sys.path.append(os.path.abspath("../../IoTPy/core"))
sys.path.append(os.path.abspath("../../IoTPy/agent_types"))
sys.path.append(os.path.abspath("../../IoTPy/helper_functions"))
# multicore is in multiprocessing
from multicore import multicore, copy_data_to_stream
# stream is in core
from stream import Stream
# op, merge, source, sink are in agent_types
from op import map_element, map_window
from merge import zip_map, merge_window
from sink import stream_to_file
# constants related to reading data from file
TIME_INTERVAL = 0.0005
WINDOW_SIZE = 1500
# threshold for determining anomaly
ANOMALY_THRESHOLD = 0.01
# COMPUTE FUNCTION f FOR THE PROCESS THAT GENERATES LOCAL ANOMALIES
def f(in_streams, out_streams):
"""
Parameters
----------
in_streams: list of Stream
in_streams is usually a list of 3 streams indicating
measurements in x, y and z directions or equivalently
in e, n, and z (for east, north, vertical) directions.
Each triaxial sensor generates x, y and z streams.
out_streams: list of Stream
out_streams has only one element, which is a
Stream of int. An element of this stream is either
1.0 or 0.0. An element is 1.0 to indicate that an
anomaly was detected and is 0.0 otherwise.
"""
# DECLARE STREAMS
# 1. zero_means
# Array of stream with one stream for each stream in in_streams
# zero_means[0, 1, 2] usually represent streams in the x, y, z direction generated by a single triaxial sensor.
zero_means = [Stream('zero_means_' + str(i)) for i in range(len(in_streams))]
# magnitudes is a stream of magnitudes of a vector from its x, y, z values
magnitudes = Stream('magnitudes')
# CREATE AGENTS
# 1. subtract_mean agent
# Define the terminating function
def subtract_mean(window):
return window[-1] - sum(window) / float(len(window))
# Wrap the terminating function to create an agent
for i in range(len(in_streams)):
map_window(
func=subtract_mean,
in_stream=in_streams[i],
out_stream=zero_means[i],
window_size=500, step_size=1,
initial_value=0.0
)
# 2. magnitude agent
# Define the terminating function
def magnitude_of_vector(coordinates):
return math.sqrt(sum([v * v for v in coordinates]))
# Wrap the terminating function to create an agent
zip_map(
func=magnitude_of_vector,
in_streams=zero_means,
out_stream=magnitudes
)
# 3. local anomaly agent
# Define the terminating function
def simple_anomaly(value):
if value > ANOMALY_THRESHOLD:
return 1.0
else:
return 0.0
# Wrap the terminating function to create an agent
map_element(
func=simple_anomaly,
in_stream=magnitudes,
out_stream=out_streams[0]
)
# THE COMPUTE FUNCTION g FOR THE AGGREGATION PROCESS
def g(in_streams, out_streams):
"""
Parameters
----------
in_streams: list of Stream
in_streams is a list of anomaly streams with one stream from
each sensor. An anomaly stream is a sequence of 0.0 and 1.0
where 0.0 indicates no anomaly and 1.0 indicates an anomaly.
out_streams: list of Stream
This list consists of a single stream that contains 0.0
when no global anomaly across all sensors is detected and 1.0
when a global anomaly is detected.
"""
# DECLARE STREAMS
# Internal steam used in g
regional_anomalies = Stream('Regional anomalies')
# CREATE AGENTS
# 1. aggregation agent
# Define the terminating function
def aggregate(windows):
number_local_anomalies = [any(window) for window in windows].count(True)
if number_local_anomalies > 1:
return 1.0
else:
return 0.0
# Wrap the terminating function to create an agent
merge_window(
func=aggregate,
in_streams=in_streams, out_stream=regional_anomalies,
window_size=250, step_size=1, initial_value=0.0
)
# 2. agent that copies stream to file
for i in range(len(in_streams)):
stream_to_file(in_streams[i], 'Anomalies_' + str(i + 1) + '_.txt')
stream_to_file(regional_anomalies, 'regional_anomalies.txt')
if __name__ == '__main__':
# dictionary of sensors and their source files
sensors = \
{
'S1':
{
'e': 'S1.e.txt',
'n': 'S1.n.txt',
'z': 'S1.z.txt'
},
'S2':
{
'e': 'S2.e.txt',
'n': 'S2.n.txt',
'z': 'S2.z.txt'
},
'S3':
{
'e': 'S3.e.txt',
'n': 'S3.n.txt',
'z': 'S3.z.txt'
}
}
# 'e' for east, 'n' for north, 'z' for vertical
directions = ['e', 'n', 'z']
# source_sensor_direction is a dict {sensor name: {direction: source function}}
# where source function is a function that copies data to stream
sensor_source_dict = {}
for sensor_name in sensors.keys():
sensor_source_dict[sensor_name] = {}
for direction in directions:
# function for source must have two arguments: process and name of the stream
def source_thread_target(proc, stream_name, filename=sensors[sensor_name][direction]):
with open(filename, 'r') as fpin:
data = list(map(float, fpin))
for i in range(0, len(data), WINDOW_SIZE):
window = data[i:i+WINDOW_SIZE]
copy_data_to_stream(window, proc, stream_name)
time.sleep(TIME_INTERVAL)
return
sensor_source_dict[sensor_name][direction] = source_thread_target
processes = {}
# define processes for all the sensors
for sensor_name in sensors.keys():
processes[sensor_name + '_process'] = \
{
'in_stream_names_types': [('in_' + direction, 'f') for direction in directions],
'out_stream_names_types': [('out', 'f')],
'compute_func': f,
'sources':
{
'source_' + direction:
{
'type': 'f',
'func': sensor_source_dict[sensor_name][direction]
}
for direction in directions
},
'actuators': {}
}
# define the aggregation process
processes['aggregation_process'] = \
{
'in_stream_names_types': [('in_' + sensor_name, 'f') for sensor_name in sensors.keys()],
'out_stream_names_types': [],
'compute_func': g,
'sources': {},
'actuators': {}
}
# make connections between processes
connections = {}
for sensor_name in sensors.keys():
process_name = sensor_name + '_process'
connections[process_name] = \
{
'out': [('aggregation_process', 'in_' + sensor_name)]
}
for direction in directions:
connections[process_name]['source_' + direction] = [(process_name, 'in_' + direction)]
connections['aggregation_process'] = {}
multicore(processes, connections)
| """
Creates a multiprocess, multithread application to detect high
readings.
https://www.assemblesoftware.com/accelerometerexample
"""
import sys
import os
import math
import time
sys.path.append(os.path.abspath("../../IoTPy/multiprocessing"))
sys.path.append(os.path.abspath("../../IoTPy/core"))
sys.path.append(os.path.abspath("../../IoTPy/agent_types"))
sys.path.append(os.path.abspath("../../IoTPy/helper_functions"))
# multicore is in multiprocessing
from multicore import multicore, copy_data_to_stream
# stream is in core
from stream import Stream
# op, merge, source, sink are in agent_types
from op import map_element, map_window
from merge import zip_map, merge_window
from sink import stream_to_file
# constants related to reading data from file
TIME_INTERVAL = 0.0005
WINDOW_SIZE = 1500
# threshold for determining anomaly
ANOMALY_THRESHOLD = 0.01
# COMPUTE FUNCTION f FOR THE PROCESS THAT GENERATES LOCAL ANOMALIES
def f(in_streams, out_streams):
"""
Parameters
----------
in_streams: list of Stream
in_streams is usually a list of 3 streams indicating
measurements in x, y and z directions or equivalently
in e, n, and z (for east, north, vertical) directions.
Each triaxial sensor generates x, y and z streams.
out_streams: list of Stream
out_streams has only one element, which is a
Stream of int. An element of this stream is either
1.0 or 0.0. An element is 1.0 to indicate that an
anomaly was detected and is 0.0 otherwise.
"""
# DECLARE STREAMS
# 1. zero_means
# Array of stream with one stream for each stream in in_streams
# zero_means[0, 1, 2] usually represent streams in the x, y, z direction generated by a single triaxial sensor.
zero_means = [Stream('zero_means_' + str(i)) for i in range(len(in_streams))]
# magnitudes is a stream of magnitudes of a vector from its x, y, z values
magnitudes = Stream('magnitudes')
# CREATE AGENTS
# 1. subtract_mean agent
# Define the terminating function
def subtract_mean(window):
return window[-1] - sum(window) / float(len(window))
# Wrap the terminating function to create an agent
for i in range(len(in_streams)):
map_window(
func=subtract_mean,
in_stream=in_streams[i],
out_stream=zero_means[i],
window_size=500, step_size=1,
initial_value=0.0
)
# 2. magnitude agent
# Define the terminating function
def magnitude_of_vector(coordinates):
return math.sqrt(sum([v * v for v in coordinates]))
# Wrap the terminating function to create an agent
zip_map(
func=magnitude_of_vector,
in_streams=zero_means,
out_stream=magnitudes
)
# 3. local anomaly agent
# Define the terminating function
def simple_anomaly(value):
if value > ANOMALY_THRESHOLD:
return 1.0
else:
return 0.0
# Wrap the terminating function to create an agent
map_element(
func=simple_anomaly,
in_stream=magnitudes,
out_stream=out_streams[0]
)
# THE COMPUTE FUNCTION g FOR THE AGGREGATION PROCESS
def g(in_streams, out_streams):
"""
Parameters
----------
in_streams: list of Stream
in_streams is a list of anomaly streams with one stream from
each sensor. An anomaly stream is a sequence of 0.0 and 1.0
where 0.0 indicates no anomaly and 1.0 indicates an anomaly.
out_streams: list of Stream
This list consists of a single stream that contains 0.0
when no global anomaly across all sensors is detected and 1.0
when a global anomaly is detected.
"""
# DECLARE STREAMS
# Internal steam used in g
regional_anomalies = Stream('Regional anomalies')
# CREATE AGENTS
# 1. aggregation agent
# Define the terminating function
def aggregate(windows):
number_local_anomalies = [any(window) for window in windows].count(True)
if number_local_anomalies > 1:
return 1.0
else:
return 0.0
# Wrap the terminating function to create an agent
merge_window(
func=aggregate,
in_streams=in_streams, out_stream=regional_anomalies,
window_size=250, step_size=1, initial_value=0.0
)
# 2. agent that copies stream to file
for i in range(len(in_streams)):
stream_to_file(in_streams[i], 'Anomalies_' + str(i + 1) + '_.txt')
stream_to_file(regional_anomalies, 'regional_anomalies.txt')
if __name__ == '__main__':
# dictionary of sensors and their source files
sensors = \
{
'S1':
{
'e': 'S1.e.txt',
'n': 'S1.n.txt',
'z': 'S1.z.txt'
},
'S2':
{
'e': 'S2.e.txt',
'n': 'S2.n.txt',
'z': 'S2.z.txt'
},
'S3':
{
'e': 'S3.e.txt',
'n': 'S3.n.txt',
'z': 'S3.z.txt'
}
}
# 'e' for east, 'n' for north, 'z' for vertical
directions = ['e', 'n', 'z']
# source_sensor_direction is a dict {sensor name: {direction: source function}}
# where source function is a function that copies data to stream
sensor_source_dict = {}
for sensor_name in sensors.keys():
sensor_source_dict[sensor_name] = {}
for direction in directions:
# function for source must have two arguments: process and name of the stream
def source_thread_target(proc, stream_name, filename=sensors[sensor_name][direction]):
with open(filename, 'r') as fpin:
data = list(map(float, fpin))
for i in range(0, len(data), WINDOW_SIZE):
window = data[i:i+WINDOW_SIZE]
copy_data_to_stream(window, proc, stream_name)
time.sleep(TIME_INTERVAL)
return
sensor_source_dict[sensor_name][direction] = source_thread_target
processes = {}
# define processes for all the sensors
for sensor_name in sensors.keys():
processes[sensor_name + '_process'] = \
{
'in_stream_names_types': [('in_' + direction, 'f') for direction in directions],
'out_stream_names_types': [('out', 'f')],
'compute_func': f,
'sources':
{
'source_' + direction:
{
'type': 'f',
'func': sensor_source_dict[sensor_name][direction]
}
for direction in directions
},
'actuators': {}
}
# define the aggregation process
processes['aggregation_process'] = \
{
'in_stream_names_types': [('in_' + sensor_name, 'f') for sensor_name in sensors.keys()],
'out_stream_names_types': [],
'compute_func': g,
'sources': {},
'actuators': {}
}
# make connections between processes
connections = {}
for sensor_name in sensors.keys():
process_name = sensor_name + '_process'
connections[process_name] = \
{
'out': [('aggregation_process', 'in_' + sensor_name)]
}
for direction in directions:
connections[process_name]['source_' + direction] = [(process_name, 'in_' + direction)]
connections['aggregation_process'] = {}
multicore(processes, connections) | en | 0.780929 | Creates a multiprocess, multithread application to detect high readings. https://www.assemblesoftware.com/accelerometerexample # multicore is in multiprocessing # stream is in core # op, merge, source, sink are in agent_types # constants related to reading data from file # threshold for determining anomaly # COMPUTE FUNCTION f FOR THE PROCESS THAT GENERATES LOCAL ANOMALIES Parameters ---------- in_streams: list of Stream in_streams is usually a list of 3 streams indicating measurements in x, y and z directions or equivalently in e, n, and z (for east, north, vertical) directions. Each triaxial sensor generates x, y and z streams. out_streams: list of Stream out_streams has only one element, which is a Stream of int. An element of this stream is either 1.0 or 0.0. An element is 1.0 to indicate that an anomaly was detected and is 0.0 otherwise. # DECLARE STREAMS # 1. zero_means # Array of stream with one stream for each stream in in_streams # zero_means[0, 1, 2] usually represent streams in the x, y, z direction generated by a single triaxial sensor. # magnitudes is a stream of magnitudes of a vector from its x, y, z values # CREATE AGENTS # 1. subtract_mean agent # Define the terminating function # Wrap the terminating function to create an agent # 2. magnitude agent # Define the terminating function # Wrap the terminating function to create an agent # 3. local anomaly agent # Define the terminating function # Wrap the terminating function to create an agent # THE COMPUTE FUNCTION g FOR THE AGGREGATION PROCESS Parameters ---------- in_streams: list of Stream in_streams is a list of anomaly streams with one stream from each sensor. An anomaly stream is a sequence of 0.0 and 1.0 where 0.0 indicates no anomaly and 1.0 indicates an anomaly. out_streams: list of Stream This list consists of a single stream that contains 0.0 when no global anomaly across all sensors is detected and 1.0 when a global anomaly is detected. # DECLARE STREAMS # Internal steam used in g # CREATE AGENTS # 1. aggregation agent # Define the terminating function # Wrap the terminating function to create an agent # 2. agent that copies stream to file # dictionary of sensors and their source files # 'e' for east, 'n' for north, 'z' for vertical # source_sensor_direction is a dict {sensor name: {direction: source function}} # where source function is a function that copies data to stream # function for source must have two arguments: process and name of the stream # define processes for all the sensors # define the aggregation process # make connections between processes | 3.345968 | 3 |
src/appenlight_demo/models/user.py | AppEnlight/demo-application | 0 | 6618779 | <filename>src/appenlight_demo/models/user.py
from sqlalchemy import (
Column,
Index,
Integer,
Unicode,
)
from sqlalchemy.orm import (
relationship
)
from .meta import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(Unicode)
addresses = relationship('Address')
| <filename>src/appenlight_demo/models/user.py
from sqlalchemy import (
Column,
Index,
Integer,
Unicode,
)
from sqlalchemy.orm import (
relationship
)
from .meta import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
username = Column(Unicode)
addresses = relationship('Address')
| none | 1 | 2.203376 | 2 | |
controllers/eid.py | ecohealthalliance/sicki | 0 | 6618780 | @require_logged_in
def model():
return json.dumps(eidEvents.model)
@require_role(admin_role)
def create():
pass
@require_logged_in
def read():
eid_id = request.args(0)
return json.dumps(eidEvents.read(eid_id))
@require_logged_in
def read_all():
fields = request.vars.get('fields')
if fields:
fields = json.loads(fields)
return json.dumps(eidEvents.read_all(fields))
@require_role(admin_role)
def update():
eid_id = request.args(0)
attributes = {}
for key, value in request.vars.iteritems():
attributes[key] = json.loads(value)
eidEvents.update(eid_id, attributes)
return json.dumps(True)
@require_role(admin_role)
def delete():
pass
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
## Everything Below this line is suspect, may need to be refactored
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# GET /sicki/eid/
@require_logged_in
def index():
sort = request.vars.get ('sort') or 'event_name'
return {'events': get_all_events (sort), 'sort': sort}
# GET /sicki/eid/events api call returns all events as JSON string.
def events():
return get_all_events_json('event_name')
@require_logged_in
def view():
eid_id = request.args (0)
event = get_event (eid_id)
return {'event': event, 'references': get_ref_names (eid_id), 'geodata': get_map (event.get ('map'))}
@require_logged_in
def fields():
return {'event_fields': event_fields}
@require_logged_in
def stats():
eid_id = request.args (0)
return {'event': get_event (eid_id), 'proposals': get_proposals (eid_id, auth.user.id)}
@require_logged_in
def event_map():
eid_id = request.args (0)
event = get_event (eid_id)
geodata = mongo.maps.find_one ({'name': event.get ('map')})
return {'event': event, 'geodata': geodata}
@require_logged_in
def wiki():
eid_id = request.args (0)
page = load_page (eid_id)
if not page:
insert_page (eid_id)
page = load_page (eid_id)
return {'event': get_event (eid_id), 'page': page}
@require_logged_in
def event_refs():
eid_id = request.args (0)
return {'event': get_event (eid_id), 'refs': get_ref_names (eid_id)}
# GET /sicki/eid/proposals/<eid_id>
@require_logged_in
def proposals():
return stats()
# POST /sicki/eid/propose/<eid_id>/<field>
# Propose a change the field of an EID event. The body of the request contains the proposed value.
# return the proposed value's id
@require_logged_in
def propose():
eid_id = request.args (0)
field = request.args (1)
value = json.loads (request.vars.get ('value'))
refs = json.loads (request.vars.get ('refs'))
user = json.loads (request.vars.get('user'))
date = json.loads (request.vars.get('date'))
event_field = get_field (field)
# Perform any pre-hooks or mappings before adding the value to the db
if upload_hooks.get (event_field['name']):
if upload_hooks.get (event_field['name']).get ('pre'):
value = upload_hooks.get (event_field['name']).get ('pre') (value)
# Automatically accept proposals by admins
if has_role (admin_role):
edit_field (eid_id, field, value)
add_refs (eid_id, refs)
prop_id = 1
else:
prop_id = propose_edit (eid_id, field, value, refs, user, date)
if upload_hooks.get (event_field['name']):
if upload_hooks.get (event_field['name']).get ('post'):
value = upload_hooks.get (event_field['name']).get ('post') (value)
return json.dumps ({
'id': prop_id,
'value': value
})
# POST /sicki/eid/reject/<prop_id>
# The proposal gets status REJECTED, and modification does not happen
@require_role (admin_role)
def reject():
prop_id = request.args (0)
prop = get_proposal (prop_id)
if not prop:
raise HTTP (400)
update_proposal_status(prop_id,REJECTED)
return 1
# POST /sicki/eid/accept/<prop_id>
# Accept a proposal. The proposal get status ACCEPTED and the corresponding field is modified.
@require_role (admin_role)
def accept():
prop_id = request.args (0)
prop = get_proposal (prop_id)
if not prop:
raise HTTP (400)
edit_field (str (prop['eid']), prop['field'], prop['value'])
add_refs (str (prop['eid']), prop['refs'])
update_proposal_status(prop_id,ACCEPTED)
#redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])]))
return 1
# POST /sicki/eid/upvote/<prop_id>
@require_logged_in
def upvote():
prop_id = request.args (0)
vote (prop_id, auth.user.id, True)
prop = get_proposal (prop_id)
#redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])]))
return True
# POST /sicki/eid/downvote/<prop_id>
@require_logged_in
def downvote():
prop_id = request.args (0)
vote (prop_id, auth.user.id, False)
prop = get_proposal (prop_id)
#redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])]))
return True
@require_logged_in
def unvote():
prop_id = request.args (0)
remove_vote (prop_id, auth.user.id)
prop = get_proposal (prop_id)
#redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])]))
return True
@require_logged_in
def refs():
return {'refs': get_ref_names ()}
@require_logged_in
def ref():
id = request.args (0);
result = mongo.refs.find_one ({'_id': id})
del result['_id']
return json.dumps (result)
def load_map():
name = request.vars.get ('name')
geodata = mongo.maps.find_one ({'name': name})
return json.dumps (geodata)
@require_logged_in
def eid_map():
return {}
| @require_logged_in
def model():
return json.dumps(eidEvents.model)
@require_role(admin_role)
def create():
pass
@require_logged_in
def read():
eid_id = request.args(0)
return json.dumps(eidEvents.read(eid_id))
@require_logged_in
def read_all():
fields = request.vars.get('fields')
if fields:
fields = json.loads(fields)
return json.dumps(eidEvents.read_all(fields))
@require_role(admin_role)
def update():
eid_id = request.args(0)
attributes = {}
for key, value in request.vars.iteritems():
attributes[key] = json.loads(value)
eidEvents.update(eid_id, attributes)
return json.dumps(True)
@require_role(admin_role)
def delete():
pass
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
## Everything Below this line is suspect, may need to be refactored
## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# GET /sicki/eid/
@require_logged_in
def index():
sort = request.vars.get ('sort') or 'event_name'
return {'events': get_all_events (sort), 'sort': sort}
# GET /sicki/eid/events api call returns all events as JSON string.
def events():
return get_all_events_json('event_name')
@require_logged_in
def view():
eid_id = request.args (0)
event = get_event (eid_id)
return {'event': event, 'references': get_ref_names (eid_id), 'geodata': get_map (event.get ('map'))}
@require_logged_in
def fields():
return {'event_fields': event_fields}
@require_logged_in
def stats():
eid_id = request.args (0)
return {'event': get_event (eid_id), 'proposals': get_proposals (eid_id, auth.user.id)}
@require_logged_in
def event_map():
eid_id = request.args (0)
event = get_event (eid_id)
geodata = mongo.maps.find_one ({'name': event.get ('map')})
return {'event': event, 'geodata': geodata}
@require_logged_in
def wiki():
eid_id = request.args (0)
page = load_page (eid_id)
if not page:
insert_page (eid_id)
page = load_page (eid_id)
return {'event': get_event (eid_id), 'page': page}
@require_logged_in
def event_refs():
eid_id = request.args (0)
return {'event': get_event (eid_id), 'refs': get_ref_names (eid_id)}
# GET /sicki/eid/proposals/<eid_id>
@require_logged_in
def proposals():
return stats()
# POST /sicki/eid/propose/<eid_id>/<field>
# Propose a change the field of an EID event. The body of the request contains the proposed value.
# return the proposed value's id
@require_logged_in
def propose():
eid_id = request.args (0)
field = request.args (1)
value = json.loads (request.vars.get ('value'))
refs = json.loads (request.vars.get ('refs'))
user = json.loads (request.vars.get('user'))
date = json.loads (request.vars.get('date'))
event_field = get_field (field)
# Perform any pre-hooks or mappings before adding the value to the db
if upload_hooks.get (event_field['name']):
if upload_hooks.get (event_field['name']).get ('pre'):
value = upload_hooks.get (event_field['name']).get ('pre') (value)
# Automatically accept proposals by admins
if has_role (admin_role):
edit_field (eid_id, field, value)
add_refs (eid_id, refs)
prop_id = 1
else:
prop_id = propose_edit (eid_id, field, value, refs, user, date)
if upload_hooks.get (event_field['name']):
if upload_hooks.get (event_field['name']).get ('post'):
value = upload_hooks.get (event_field['name']).get ('post') (value)
return json.dumps ({
'id': prop_id,
'value': value
})
# POST /sicki/eid/reject/<prop_id>
# The proposal gets status REJECTED, and modification does not happen
@require_role (admin_role)
def reject():
prop_id = request.args (0)
prop = get_proposal (prop_id)
if not prop:
raise HTTP (400)
update_proposal_status(prop_id,REJECTED)
return 1
# POST /sicki/eid/accept/<prop_id>
# Accept a proposal. The proposal get status ACCEPTED and the corresponding field is modified.
@require_role (admin_role)
def accept():
prop_id = request.args (0)
prop = get_proposal (prop_id)
if not prop:
raise HTTP (400)
edit_field (str (prop['eid']), prop['field'], prop['value'])
add_refs (str (prop['eid']), prop['refs'])
update_proposal_status(prop_id,ACCEPTED)
#redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])]))
return 1
# POST /sicki/eid/upvote/<prop_id>
@require_logged_in
def upvote():
prop_id = request.args (0)
vote (prop_id, auth.user.id, True)
prop = get_proposal (prop_id)
#redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])]))
return True
# POST /sicki/eid/downvote/<prop_id>
@require_logged_in
def downvote():
prop_id = request.args (0)
vote (prop_id, auth.user.id, False)
prop = get_proposal (prop_id)
#redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])]))
return True
@require_logged_in
def unvote():
prop_id = request.args (0)
remove_vote (prop_id, auth.user.id)
prop = get_proposal (prop_id)
#redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])]))
return True
@require_logged_in
def refs():
return {'refs': get_ref_names ()}
@require_logged_in
def ref():
id = request.args (0);
result = mongo.refs.find_one ({'_id': id})
del result['_id']
return json.dumps (result)
def load_map():
name = request.vars.get ('name')
geodata = mongo.maps.find_one ({'name': name})
return json.dumps (geodata)
@require_logged_in
def eid_map():
return {}
| en | 0.732776 | ## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! ## Everything Below this line is suspect, may need to be refactored ## !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # GET /sicki/eid/ # GET /sicki/eid/events api call returns all events as JSON string. # GET /sicki/eid/proposals/<eid_id> # POST /sicki/eid/propose/<eid_id>/<field> # Propose a change the field of an EID event. The body of the request contains the proposed value. # return the proposed value's id # Perform any pre-hooks or mappings before adding the value to the db # Automatically accept proposals by admins # POST /sicki/eid/reject/<prop_id> # The proposal gets status REJECTED, and modification does not happen # POST /sicki/eid/accept/<prop_id> # Accept a proposal. The proposal get status ACCEPTED and the corresponding field is modified. #redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])])) # POST /sicki/eid/upvote/<prop_id> #redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])])) # POST /sicki/eid/downvote/<prop_id> #redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])])) #redirect (URL (r = request, c = 'eid', f = 'view', args = [str (prop['eid'])])) | 2.026114 | 2 |
Scripts/Resources/sap_resources.py | illives/R_FATMAN_J1 | 0 | 6618781 | <gh_stars>0
import sys, win32com.client, subprocess, time, os
import pandas as pd
import numpy as np
from time import sleep
from .gui_resources import gui
class sap_tools:
def __init__(self, user, password, data, sap_path, file_path):
self.user = user
self.password = password
self.data = data
self.sap_path = sap_path
self.file_path = file_path
def J1B1N(user, password, sap_path, file_path):
"""
Acessar a J1B1N do SAP para criar NFs de acordo com planilha.\n
(User = Usuario SAP,\n
password = <PASSWORD> sap,\n
sap_path = Diretório SAP,\n
file_path = Diretório dos arquivos)
"""
try:
subprocess.Popen(sap_path)
sleep(2)
SapGuiAuto = win32com.client.GetObject('SAPGUI')
if not type(SapGuiAuto) == win32com.client.CDispatch:
return
application = SapGuiAuto.GetScriptingEngine
if not type(application) == win32com.client.CDispatch:
SapGuiAuto = None
return
connection = application.OpenConnection("001 - SAP PRODUÇÃO CLARO BRASIL – CLICAR AQUI")
if not type(connection) == win32com.client.CDispatch:
application = None
SapGuiAuto = None
return
session = connection.Children(0)
if not type(session) == win32com.client.CDispatch:
connection = None
application = None
SapGuiAuto = None
return
try:
df = pd.read_excel(file_path + 'Base_baixa.xlsx')
ultima = len(df)
session.findById("wnd[0]").maximize
session.findById("wnd[0]/usr/txtRSYST-BNAME").text = (user)
session.findById("wnd[0]/usr/pwdRSYST-BCODE").text = (password)
session.findById("wnd[0]").sendVKey(0)
lista = []
texto = ('ctg_nf ; empresa ; loc_neg ; func_parceiro ; id_parceiro ; t_item_nf ; material ; centro ; qtd ; preco ; cfop ; dir_fisc_icm ; dir_fisc_ipi ; confins ; pis ; tipo_imposto_01 ; tipo_imposto_02 ; tipo Imposto_03 ; mont_basico_ICM3 ; mont_basico_ICS3 ; taxa_imposto_0102 ; taxa_imposto_03 ; outra_base ; msg ; doc_num')
lista.append(texto)
for k in range(0,ultima):
ctg_nf = df.iloc[k,0]
empresa = df.iloc[k,1]
if len(str(empresa)) <= 1:
empresa = str(f'00{empresa}')
loc_neg = df.iloc[k,2]
func_parceiro = df.iloc[k,3]
id_parceiro = df.iloc[k,4]
t_item_nf = df.iloc[k,5]
material = df.iloc[k,6]
centro = df.iloc[k,7]
qtd = df.iloc[k,8]
preco = df.iloc[k,9]
cfop = df.iloc[k,10]
dir_fisc_icm = df.iloc[k,11]
dir_fisc_ipi = df.iloc[k,12]
confins = df.iloc[k,13]
pis = df.iloc[k,14]
tipo_imposto_01 = df.iloc[k,15]
tipo_imposto_02 = df.iloc[k,16]
try:
tipo_imposto_03 = df.iloc[k,17]
except:
tipo_imposto_03 = ''
mont_basico_ICM3 = df.iloc[k,18]
mont_basico_ICS3 = df.iloc[k,19]
taxa_imposto_0102 = df.iloc[k,20]
taxa_imposto_03 = df.iloc[k,21]
outra_base = df.iloc[k,22]
msg = df.iloc[k,23]
Calculo_ICM3 = df.iloc[k,26]
Calculo_ICS3 = df.iloc[k,27]
session.findById("wnd[0]/tbar[0]/okcd").text = "J1B1N"
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-NFTYPE").text = ctg_nf
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-BUKRS").text = empresa
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-BRANCH").text = loc_neg
session.findById("wnd[0]/usr/cmbJ_1BDYDOC-PARVW").key = "WE"
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-PARID").text = id_parceiro
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB4").select()
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB4/ssubHEADER_TAB:SAPLJ1BB2:2400/tblSAPLJ1BB2MESSAGE_CONTROL/txtJ_1BDYFTX-MESSAGE[0,0]").text = msg
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5").select()
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5/ssubHEADER_TAB:SAPLJ1BB2:2500/ctxtJ_1BDYDOC-INCO1").text = "CIF"
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5/ssubHEADER_TAB:SAPLJ1BB2:2500/txtJ_1BDYDOC-ANZPK").text = "1"
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5/ssubHEADER_TAB:SAPLJ1BB2:2500/txtJ_1BDYDOC-NTGEW").text = "100"
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5/ssubHEADER_TAB:SAPLJ1BB2:2500/txtJ_1BDYDOC-BRGEW").text = "100"
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1").select()
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-ITMTYP[1,0]").text = t_item_nf
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-MATNR[2,0]").text = material
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-WERKS[3,0]").text = centro
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/txtJ_1BDYLIN-MENGE[6,0]").text = qtd
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/txtJ_1BDYLIN-NETPR[9,0]").text = preco
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-CFOP[13,0]").text = cfop
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-TAXLW1[14,0]").text = dir_fisc_icm
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-TAXLW2[15,0]").text = dir_fisc_ipi
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-TAXLW4[16,0]").text = confins
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-TAXLW5[17,0]").text = pis
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL").getAbsoluteRow(0).selected = True
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/btn%#AUTOTEXT002").press()
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/ctxtJ_1BDYSTX-TAXTYP[0,0]").text = tipo_imposto_01
try:
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/ctxtJ_1BDYSTX-TAXTYP[0,2]").text = tipo_imposto_02
except:
pass
try:
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/ctxtJ_1BDYSTX-TAXTYP[0,1]").text = tipo_imposto_03
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-RATE[4,1]").text = taxa_imposto_03
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-TAXVAL[5,1]").text = Calculo_ICS3
except:
pass
try:
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-BASE[3,1]").text = mont_basico_ICS3
except:
pass
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-BASE[3,0]").text = mont_basico_ICM3
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-TAXVAL[5,0]").text = Calculo_ICM3
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-RATE[4,0]").text = taxa_imposto_0102
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-OTHBAS[7,0]").text = ""
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-OTHBAS[7,2]").text = outra_base
#session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-RATE[4,2]").text = taxa_imposto_03
session.findById("wnd[0]").sendVKey (0)
session.findById("wnd[0]").sendVKey (0)
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL").getAbsoluteRow(0).selected = True
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL").getAbsoluteRow(1).selected = True
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL").getAbsoluteRow(2).selected = True
#session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/btnPB_CALCULATOR").press()
session.findById("wnd[0]/tbar[0]/btn[11]").press()
doc_num = session.findById("wnd[0]/sbar").text
doc_num = doc_num.split()
doc_num = str(doc_num[2])
#taxa_imposto = df.iloc[k,18]
texto = (f'{ctg_nf} ; {empresa} ; {loc_neg} ; {func_parceiro} ; {id_parceiro} ; {t_item_nf} ; {material} ;{centro} ; {qtd} ; {preco} ; {cfop} ; {dir_fisc_icm} ; {dir_fisc_ipi} ; {confins} ; {pis} ; {tipo_imposto_01} ; {tipo_imposto_02} ; {tipo_imposto_03} ; {mont_basico_ICM3} ; {mont_basico_ICS3} ; {taxa_imposto_0102} ; {taxa_imposto_03} ; {outra_base} ; {msg} ; {doc_num}')
lista.append(texto)
session.findById("wnd[0]").sendVKey (3)
df = pd.DataFrame(lista)
df.to_csv(file_path + 'Base_baixa.csv', sep = ';', encoding= 'UTF-8', index = False, header = 0)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
df = pd.DataFrame(lista)
df.to_csv(file_path + 'Base_baixa.csv', sep = ' ; ', encoding= 'UTF-8', index = False, header = 0)
finally:
os.system("taskkill /f /im saplogon.exe")
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def J1BNFE(user, password, sap_path, file_path):
"""
Acessar a J1BNFE do SAP para consultar NFs de acordo com planilha.\n
(User = Usuario SAP,\n
password = <PASSWORD> sap,\n
sap_path = Diretório SAP,\n
file_path = Diretório dos arquivos)
"""
try:
subprocess.Popen(sap_path)
sleep(2)
SapGuiAuto = win32com.client.GetObject('SAPGUI')
if not type(SapGuiAuto) == win32com.client.CDispatch:
return
application = SapGuiAuto.GetScriptingEngine
if not type(application) == win32com.client.CDispatch:
SapGuiAuto = None
return
connection = application.OpenConnection("001 - SAP PRODUÇÃO CLARO BRASIL – CLICAR AQUI")
if not type(connection) == win32com.client.CDispatch:
application = None
SapGuiAuto = None
return
session = connection.Children(0)
if not type(session) == win32com.client.CDispatch:
connection = None
application = None
SapGuiAuto = None
return
try:
df = pd.read_csv(file_path + 'Base_baixa.csv', encoding = 'UTF-8', sep = ' ; ', header = None, engine = 'python', skiprows = range(1))
df.rename(columns = {23:"doc_num"}, inplace = True)
df.rename(columns = {0:"ctg_nf"}, inplace = True)
df["doc_num"] = df["doc_num"].str.replace(r'"','')
df["ctg_nf"] = df["ctg_nf"].str.replace(r'"','')
df["doc_num"] = df["doc_num"].astype(int)
except:
df = pd.read_csv(file_path + 'Base_baixa.csv', encoding = 'UTF-8', sep = ' ; ', engine = 'python', header = 0)
lista = df["doc_num"].unique()
lista = lista.tolist()
df_list = pd.DataFrame(lista)
df_list.to_clipboard(index = False, header = None)
session.findById("wnd[0]").maximize
session.findById("wnd[0]/usr/txtRSYST-BNAME").text = (user)
session.findById("wnd[0]/usr/pwdRSYST-BCODE").text = (password)
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]/tbar[0]/okcd").text = "j1bnfe"
session.findById("wnd[0]").sendVKey (0)
session.findById("wnd[0]/usr/ctxtBUKRS-LOW").text = "001"
session.findById("wnd[0]/usr/btn%_DOCNUM_%_APP_%-VALU_PUSH").press()
session.findById("wnd[1]/tbar[0]/btn[16]").press()
session.findById("wnd[1]/tbar[0]/btn[24]").press()
session.findById("wnd[1]/tbar[0]/btn[8]").press()
session.findById("wnd[0]").sendVKey (8)
#gui.waiting_frame()
session.findById("wnd[0]").sendVKey (5)
cont = 0
while True:
try:
session.findById("wnd[0]/usr/cntlNFE_CONTAINER/shellcont/shell").currentCellRow = cont
cont +=1
except:
break
session.findById("wnd[0]/usr/cntlNFE_CONTAINER/shellcont/shell").selectColumn ("DOCNUM")
session.findById("wnd[0]/usr/cntlNFE_CONTAINER/shellcont/shell").selectColumn ("CODE")
Tabla = session.findById("wnd[0]/usr/cntlNFE_CONTAINER/shellcont/shell")
Tabla.contextMenu()
sleep(3)
Tabla.selectContextMenuItemBytext ("Copiar texto")
sleep(1)
Tabla.selectContextMenuItemBytext ("Copiar texto")
df2 = pd.read_clipboard(header = None)
df2.rename(columns = {0:"NF", 1:"status"}, inplace = True)
df2 = df2[df2.status == 100]
lista = df2["NF"].unique()
lista = lista.tolist()
df['status'] = np.where((df['doc_num'].isin(lista)), '100', '')
cab = ["ctg_nf" , "empresa" , 'loc_neg' , 'func_parceiro' , 'id_parceiro' , 't_item_nf' , 'material__centro' , 'qtd' , 'preco' , 'cfop' , 'dir_fisc_icm' , 'dir_fisc_ipi' , 'confins' , 'pis' , 'tipo_imposto_01' , 'tipo_imposto_02' , 'tipo_imposto_03', 'mont_basico_ICM3' , 'mont_basico_ICS3' , 'taxa_imposto_0102' , 'taxa_imposto_03' , 'outra_base' , 'msg', 'doc_num', 'status']
try:
df.to_csv (file_path + 'Base_baixa.csv', sep = ';', encoding= 'UTF-8', index = False, header = cab)
except:
df.to_csv (file_path + 'Base_baixa.csv', sep = ';', encoding= 'UTF-8', index = False)
session.findById("wnd[0]").sendVKey (3)
session.findById("wnd[0]").sendVKey (3)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
os.system("taskkill /f /im saplogon.exe")
finally:
os.system("taskkill /f /im saplogon.exe")
os.system("taskkill /f /im excel.exe")
def J1B3N(user, password, sap_path, file_path):
"""
Acessar a J1B3N do SAP para imprimir NFs de acordo com planilha.\n
(User = Usuario SAP,\n
password = <PASSWORD> sap,\n
sap_path = Diretório SAP,\n
file_path = Diretório dos arquivos)
"""
try:
subprocess.Popen(sap_path)
sleep(2)
SapGuiAuto = win32com.client.GetObject('SAPGUI')
if not type(SapGuiAuto) == win32com.client.CDispatch:
return
application = SapGuiAuto.GetScriptingEngine
if not type(application) == win32com.client.CDispatch:
SapGuiAuto = None
return
connection = application.OpenConnection("001 - SAP PRODUÇÃO CLARO BRASIL – CLICAR AQUI")
if not type(connection) == win32com.client.CDispatch:
application = None
SapGuiAuto = None
return
session = connection.Children(0)
if not type(session) == win32com.client.CDispatch:
connection = None
application = None
SapGuiAuto = None
return
df = pd.read_csv(file_path + 'Base_baixa.csv', encoding = 'UTF-8', sep = ';', header = 0)
ultima = len(df)
session.findById("wnd[0]").maximize
session.findById("wnd[0]/usr/txtRSYST-BNAME").text = (user)
session.findById("wnd[0]/usr/pwdRSYST-BCODE").text = (password)
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]").maximize
session.findById("wnd[0]/tbar[0]/okcd").text = "J1B3N"
session.findById("wnd[0]").sendVKey (0)
for k in range(0, ultima):
status = str(df.iloc[k,24])
doc_num = df.iloc[k,23]
if status == '100':
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-DOCNUM").text = ''
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-DOCNUM").text = doc_num
session.findById("wnd[0]/mbar/menu[0]/menu[8]").select()
session.findById("wnd[0]").sendVKey (0)
session.findById("wnd[1]/usr/btnSPOP-OPTION1").press()
session.findById("wnd[1]/tbar[0]/btn[0]").press()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
finally:
os.system('taskkill /f /im excel.exe')
os.system('taskkill /f /im saplogon.exe') | import sys, win32com.client, subprocess, time, os
import pandas as pd
import numpy as np
from time import sleep
from .gui_resources import gui
class sap_tools:
def __init__(self, user, password, data, sap_path, file_path):
self.user = user
self.password = password
self.data = data
self.sap_path = sap_path
self.file_path = file_path
def J1B1N(user, password, sap_path, file_path):
"""
Acessar a J1B1N do SAP para criar NFs de acordo com planilha.\n
(User = Usuario SAP,\n
password = <PASSWORD> sap,\n
sap_path = Diretório SAP,\n
file_path = Diretório dos arquivos)
"""
try:
subprocess.Popen(sap_path)
sleep(2)
SapGuiAuto = win32com.client.GetObject('SAPGUI')
if not type(SapGuiAuto) == win32com.client.CDispatch:
return
application = SapGuiAuto.GetScriptingEngine
if not type(application) == win32com.client.CDispatch:
SapGuiAuto = None
return
connection = application.OpenConnection("001 - SAP PRODUÇÃO CLARO BRASIL – CLICAR AQUI")
if not type(connection) == win32com.client.CDispatch:
application = None
SapGuiAuto = None
return
session = connection.Children(0)
if not type(session) == win32com.client.CDispatch:
connection = None
application = None
SapGuiAuto = None
return
try:
df = pd.read_excel(file_path + 'Base_baixa.xlsx')
ultima = len(df)
session.findById("wnd[0]").maximize
session.findById("wnd[0]/usr/txtRSYST-BNAME").text = (user)
session.findById("wnd[0]/usr/pwdRSYST-BCODE").text = (password)
session.findById("wnd[0]").sendVKey(0)
lista = []
texto = ('ctg_nf ; empresa ; loc_neg ; func_parceiro ; id_parceiro ; t_item_nf ; material ; centro ; qtd ; preco ; cfop ; dir_fisc_icm ; dir_fisc_ipi ; confins ; pis ; tipo_imposto_01 ; tipo_imposto_02 ; tipo Imposto_03 ; mont_basico_ICM3 ; mont_basico_ICS3 ; taxa_imposto_0102 ; taxa_imposto_03 ; outra_base ; msg ; doc_num')
lista.append(texto)
for k in range(0,ultima):
ctg_nf = df.iloc[k,0]
empresa = df.iloc[k,1]
if len(str(empresa)) <= 1:
empresa = str(f'00{empresa}')
loc_neg = df.iloc[k,2]
func_parceiro = df.iloc[k,3]
id_parceiro = df.iloc[k,4]
t_item_nf = df.iloc[k,5]
material = df.iloc[k,6]
centro = df.iloc[k,7]
qtd = df.iloc[k,8]
preco = df.iloc[k,9]
cfop = df.iloc[k,10]
dir_fisc_icm = df.iloc[k,11]
dir_fisc_ipi = df.iloc[k,12]
confins = df.iloc[k,13]
pis = df.iloc[k,14]
tipo_imposto_01 = df.iloc[k,15]
tipo_imposto_02 = df.iloc[k,16]
try:
tipo_imposto_03 = df.iloc[k,17]
except:
tipo_imposto_03 = ''
mont_basico_ICM3 = df.iloc[k,18]
mont_basico_ICS3 = df.iloc[k,19]
taxa_imposto_0102 = df.iloc[k,20]
taxa_imposto_03 = df.iloc[k,21]
outra_base = df.iloc[k,22]
msg = df.iloc[k,23]
Calculo_ICM3 = df.iloc[k,26]
Calculo_ICS3 = df.iloc[k,27]
session.findById("wnd[0]/tbar[0]/okcd").text = "J1B1N"
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-NFTYPE").text = ctg_nf
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-BUKRS").text = empresa
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-BRANCH").text = loc_neg
session.findById("wnd[0]/usr/cmbJ_1BDYDOC-PARVW").key = "WE"
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-PARID").text = id_parceiro
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB4").select()
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB4/ssubHEADER_TAB:SAPLJ1BB2:2400/tblSAPLJ1BB2MESSAGE_CONTROL/txtJ_1BDYFTX-MESSAGE[0,0]").text = msg
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5").select()
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5/ssubHEADER_TAB:SAPLJ1BB2:2500/ctxtJ_1BDYDOC-INCO1").text = "CIF"
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5/ssubHEADER_TAB:SAPLJ1BB2:2500/txtJ_1BDYDOC-ANZPK").text = "1"
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5/ssubHEADER_TAB:SAPLJ1BB2:2500/txtJ_1BDYDOC-NTGEW").text = "100"
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB5/ssubHEADER_TAB:SAPLJ1BB2:2500/txtJ_1BDYDOC-BRGEW").text = "100"
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1").select()
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-ITMTYP[1,0]").text = t_item_nf
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-MATNR[2,0]").text = material
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-WERKS[3,0]").text = centro
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/txtJ_1BDYLIN-MENGE[6,0]").text = qtd
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/txtJ_1BDYLIN-NETPR[9,0]").text = preco
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-CFOP[13,0]").text = cfop
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-TAXLW1[14,0]").text = dir_fisc_icm
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-TAXLW2[15,0]").text = dir_fisc_ipi
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-TAXLW4[16,0]").text = confins
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL/ctxtJ_1BDYLIN-TAXLW5[17,0]").text = pis
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/tblSAPLJ1BB2ITEM_CONTROL").getAbsoluteRow(0).selected = True
session.findById("wnd[0]/usr/tabsTABSTRIP1/tabpTAB1/ssubHEADER_TAB:SAPLJ1BB2:2100/btn%#AUTOTEXT002").press()
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/ctxtJ_1BDYSTX-TAXTYP[0,0]").text = tipo_imposto_01
try:
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/ctxtJ_1BDYSTX-TAXTYP[0,2]").text = tipo_imposto_02
except:
pass
try:
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/ctxtJ_1BDYSTX-TAXTYP[0,1]").text = tipo_imposto_03
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-RATE[4,1]").text = taxa_imposto_03
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-TAXVAL[5,1]").text = Calculo_ICS3
except:
pass
try:
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-BASE[3,1]").text = mont_basico_ICS3
except:
pass
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-BASE[3,0]").text = mont_basico_ICM3
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-TAXVAL[5,0]").text = Calculo_ICM3
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-RATE[4,0]").text = taxa_imposto_0102
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-OTHBAS[7,0]").text = ""
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-OTHBAS[7,2]").text = outra_base
#session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-RATE[4,2]").text = taxa_imposto_03
session.findById("wnd[0]").sendVKey (0)
session.findById("wnd[0]").sendVKey (0)
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL").getAbsoluteRow(0).selected = True
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL").getAbsoluteRow(1).selected = True
session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL").getAbsoluteRow(2).selected = True
#session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/btnPB_CALCULATOR").press()
session.findById("wnd[0]/tbar[0]/btn[11]").press()
doc_num = session.findById("wnd[0]/sbar").text
doc_num = doc_num.split()
doc_num = str(doc_num[2])
#taxa_imposto = df.iloc[k,18]
texto = (f'{ctg_nf} ; {empresa} ; {loc_neg} ; {func_parceiro} ; {id_parceiro} ; {t_item_nf} ; {material} ;{centro} ; {qtd} ; {preco} ; {cfop} ; {dir_fisc_icm} ; {dir_fisc_ipi} ; {confins} ; {pis} ; {tipo_imposto_01} ; {tipo_imposto_02} ; {tipo_imposto_03} ; {mont_basico_ICM3} ; {mont_basico_ICS3} ; {taxa_imposto_0102} ; {taxa_imposto_03} ; {outra_base} ; {msg} ; {doc_num}')
lista.append(texto)
session.findById("wnd[0]").sendVKey (3)
df = pd.DataFrame(lista)
df.to_csv(file_path + 'Base_baixa.csv', sep = ';', encoding= 'UTF-8', index = False, header = 0)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
df = pd.DataFrame(lista)
df.to_csv(file_path + 'Base_baixa.csv', sep = ' ; ', encoding= 'UTF-8', index = False, header = 0)
finally:
os.system("taskkill /f /im saplogon.exe")
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
def J1BNFE(user, password, sap_path, file_path):
"""
Acessar a J1BNFE do SAP para consultar NFs de acordo com planilha.\n
(User = Usuario SAP,\n
password = <PASSWORD> sap,\n
sap_path = Diretório SAP,\n
file_path = Diretório dos arquivos)
"""
try:
subprocess.Popen(sap_path)
sleep(2)
SapGuiAuto = win32com.client.GetObject('SAPGUI')
if not type(SapGuiAuto) == win32com.client.CDispatch:
return
application = SapGuiAuto.GetScriptingEngine
if not type(application) == win32com.client.CDispatch:
SapGuiAuto = None
return
connection = application.OpenConnection("001 - SAP PRODUÇÃO CLARO BRASIL – CLICAR AQUI")
if not type(connection) == win32com.client.CDispatch:
application = None
SapGuiAuto = None
return
session = connection.Children(0)
if not type(session) == win32com.client.CDispatch:
connection = None
application = None
SapGuiAuto = None
return
try:
df = pd.read_csv(file_path + 'Base_baixa.csv', encoding = 'UTF-8', sep = ' ; ', header = None, engine = 'python', skiprows = range(1))
df.rename(columns = {23:"doc_num"}, inplace = True)
df.rename(columns = {0:"ctg_nf"}, inplace = True)
df["doc_num"] = df["doc_num"].str.replace(r'"','')
df["ctg_nf"] = df["ctg_nf"].str.replace(r'"','')
df["doc_num"] = df["doc_num"].astype(int)
except:
df = pd.read_csv(file_path + 'Base_baixa.csv', encoding = 'UTF-8', sep = ' ; ', engine = 'python', header = 0)
lista = df["doc_num"].unique()
lista = lista.tolist()
df_list = pd.DataFrame(lista)
df_list.to_clipboard(index = False, header = None)
session.findById("wnd[0]").maximize
session.findById("wnd[0]/usr/txtRSYST-BNAME").text = (user)
session.findById("wnd[0]/usr/pwdRSYST-BCODE").text = (password)
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]/tbar[0]/okcd").text = "j1bnfe"
session.findById("wnd[0]").sendVKey (0)
session.findById("wnd[0]/usr/ctxtBUKRS-LOW").text = "001"
session.findById("wnd[0]/usr/btn%_DOCNUM_%_APP_%-VALU_PUSH").press()
session.findById("wnd[1]/tbar[0]/btn[16]").press()
session.findById("wnd[1]/tbar[0]/btn[24]").press()
session.findById("wnd[1]/tbar[0]/btn[8]").press()
session.findById("wnd[0]").sendVKey (8)
#gui.waiting_frame()
session.findById("wnd[0]").sendVKey (5)
cont = 0
while True:
try:
session.findById("wnd[0]/usr/cntlNFE_CONTAINER/shellcont/shell").currentCellRow = cont
cont +=1
except:
break
session.findById("wnd[0]/usr/cntlNFE_CONTAINER/shellcont/shell").selectColumn ("DOCNUM")
session.findById("wnd[0]/usr/cntlNFE_CONTAINER/shellcont/shell").selectColumn ("CODE")
Tabla = session.findById("wnd[0]/usr/cntlNFE_CONTAINER/shellcont/shell")
Tabla.contextMenu()
sleep(3)
Tabla.selectContextMenuItemBytext ("Copiar texto")
sleep(1)
Tabla.selectContextMenuItemBytext ("Copiar texto")
df2 = pd.read_clipboard(header = None)
df2.rename(columns = {0:"NF", 1:"status"}, inplace = True)
df2 = df2[df2.status == 100]
lista = df2["NF"].unique()
lista = lista.tolist()
df['status'] = np.where((df['doc_num'].isin(lista)), '100', '')
cab = ["ctg_nf" , "empresa" , 'loc_neg' , 'func_parceiro' , 'id_parceiro' , 't_item_nf' , 'material__centro' , 'qtd' , 'preco' , 'cfop' , 'dir_fisc_icm' , 'dir_fisc_ipi' , 'confins' , 'pis' , 'tipo_imposto_01' , 'tipo_imposto_02' , 'tipo_imposto_03', 'mont_basico_ICM3' , 'mont_basico_ICS3' , 'taxa_imposto_0102' , 'taxa_imposto_03' , 'outra_base' , 'msg', 'doc_num', 'status']
try:
df.to_csv (file_path + 'Base_baixa.csv', sep = ';', encoding= 'UTF-8', index = False, header = cab)
except:
df.to_csv (file_path + 'Base_baixa.csv', sep = ';', encoding= 'UTF-8', index = False)
session.findById("wnd[0]").sendVKey (3)
session.findById("wnd[0]").sendVKey (3)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
os.system("taskkill /f /im saplogon.exe")
finally:
os.system("taskkill /f /im saplogon.exe")
os.system("taskkill /f /im excel.exe")
def J1B3N(user, password, sap_path, file_path):
"""
Acessar a J1B3N do SAP para imprimir NFs de acordo com planilha.\n
(User = Usuario SAP,\n
password = <PASSWORD> sap,\n
sap_path = Diretório SAP,\n
file_path = Diretório dos arquivos)
"""
try:
subprocess.Popen(sap_path)
sleep(2)
SapGuiAuto = win32com.client.GetObject('SAPGUI')
if not type(SapGuiAuto) == win32com.client.CDispatch:
return
application = SapGuiAuto.GetScriptingEngine
if not type(application) == win32com.client.CDispatch:
SapGuiAuto = None
return
connection = application.OpenConnection("001 - SAP PRODUÇÃO CLARO BRASIL – CLICAR AQUI")
if not type(connection) == win32com.client.CDispatch:
application = None
SapGuiAuto = None
return
session = connection.Children(0)
if not type(session) == win32com.client.CDispatch:
connection = None
application = None
SapGuiAuto = None
return
df = pd.read_csv(file_path + 'Base_baixa.csv', encoding = 'UTF-8', sep = ';', header = 0)
ultima = len(df)
session.findById("wnd[0]").maximize
session.findById("wnd[0]/usr/txtRSYST-BNAME").text = (user)
session.findById("wnd[0]/usr/pwdRSYST-BCODE").text = (password)
session.findById("wnd[0]").sendVKey(0)
session.findById("wnd[0]").maximize
session.findById("wnd[0]/tbar[0]/okcd").text = "J1B3N"
session.findById("wnd[0]").sendVKey (0)
for k in range(0, ultima):
status = str(df.iloc[k,24])
doc_num = df.iloc[k,23]
if status == '100':
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-DOCNUM").text = ''
session.findById("wnd[0]/usr/ctxtJ_1BDYDOC-DOCNUM").text = doc_num
session.findById("wnd[0]/mbar/menu[0]/menu[8]").select()
session.findById("wnd[0]").sendVKey (0)
session.findById("wnd[1]/usr/btnSPOP-OPTION1").press()
session.findById("wnd[1]/tbar[0]/btn[0]").press()
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, fname, exc_tb.tb_lineno)
finally:
os.system('taskkill /f /im excel.exe')
os.system('taskkill /f /im saplogon.exe') | pt | 0.565581 | Acessar a J1B1N do SAP para criar NFs de acordo com planilha.\n (User = Usuario SAP,\n password = <PASSWORD> sap,\n sap_path = Diretório SAP,\n file_path = Diretório dos arquivos) #AUTOTEXT002").press() #session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/tblSAPLJ1BB2TAX_CONTROL/txtJ_1BDYSTX-RATE[4,2]").text = taxa_imposto_03 #session.findById("wnd[0]/usr/tabsITEM_TAB/tabpTAX/ssubITEM_TABS:SAPLJ1BB2:3200/btnPB_CALCULATOR").press() #taxa_imposto = df.iloc[k,18] Acessar a J1BNFE do SAP para consultar NFs de acordo com planilha.\n (User = Usuario SAP,\n password = <PASSWORD> sap,\n sap_path = Diretório SAP,\n file_path = Diretório dos arquivos) #gui.waiting_frame() Acessar a J1B3N do SAP para imprimir NFs de acordo com planilha.\n (User = Usuario SAP,\n password = <PASSWORD> sap,\n sap_path = Diretório SAP,\n file_path = Diretório dos arquivos) | 2.30494 | 2 |
toxic_comment_classification/model.py | EuroPy/EuroPy-Examples | 1 | 6618782 | import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten
from transformers import TFBertModel
class BertClassifier(tf.keras.Model):
def __init__(self, bert: TFBertModel, num_classes: int):
super().__init__()
self.bert = bert
self.classifier = Dense(num_classes, activation='sigmoid')
@tf.function
def call(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask
)
cls_output = outputs[1]
cls_output = self.classifier(cls_output)
return cls_output | import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten
from transformers import TFBertModel
class BertClassifier(tf.keras.Model):
def __init__(self, bert: TFBertModel, num_classes: int):
super().__init__()
self.bert = bert
self.classifier = Dense(num_classes, activation='sigmoid')
@tf.function
def call(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask
)
cls_output = outputs[1]
cls_output = self.classifier(cls_output)
return cls_output | none | 1 | 2.877717 | 3 | |
pcmdi_metrics/monsoon_wang/scripts/mpindex_compute.py | tomvothecoder/pcmdi_metrics | 47 | 6618783 | <filename>pcmdi_metrics/monsoon_wang/scripts/mpindex_compute.py
#!/usr/bin/env python
from pcmdi_metrics.monsoon_wang import create_monsoon_wang_parser, monsoon_wang_runner
P = create_monsoon_wang_parser()
args = P.get_parameter(argparse_vals_only=False)
monsoon_wang_runner(args)
| <filename>pcmdi_metrics/monsoon_wang/scripts/mpindex_compute.py
#!/usr/bin/env python
from pcmdi_metrics.monsoon_wang import create_monsoon_wang_parser, monsoon_wang_runner
P = create_monsoon_wang_parser()
args = P.get_parameter(argparse_vals_only=False)
monsoon_wang_runner(args)
| ru | 0.26433 | #!/usr/bin/env python | 1.602198 | 2 |
cookielaw/__init__.py | Wenze/django-cookie-law | 0 | 6618784 | VERSION = (1, 0, 13)
| VERSION = (1, 0, 13)
| none | 1 | 1.119375 | 1 | |
learn_to_code/chapter_3/rpc.py | Candace-Beall/python_kata | 0 | 6618785 | print('Justin!')
color = 'blue'
guess = ''
guesses = 0
while color != guess:
guess = input('What color am I thinking of? ')
guesses = guesses + 1
print('You got it! It took you', guesses, 'guesses')
| print('Justin!')
color = 'blue'
guess = ''
guesses = 0
while color != guess:
guess = input('What color am I thinking of? ')
guesses = guesses + 1
print('You got it! It took you', guesses, 'guesses')
| none | 1 | 3.931911 | 4 | |
cogs/runescape.py | Sean-Griffith/discord-bot | 0 | 6618786 | import discord
from discord.ext import commands
import datetime
import requests
import json
from .base import runescape_base as RB
class Runescape(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.rswiki_session = requests.Session()
self.rswiki_session.headers["User-Agent"] = "Discord bot for querying grand exchange data."
@commands.command(aliases = ["ge"])
async def grand_exchange(self, ctx, *item_name):
item_name = str(" ".join(item_name)).capitalize()
print("Search for:", item_name)
# Query rswiki for GE data
url = "https://api.weirdgloop.org/exchange/history/rs/latest"
data = self.rswiki_session.get(url+"?name={}".format(item_name))
item_name = list(data.json().keys())[0]
item_data = data.json()[item_name]
await ctx.channel.send(embed=RB.generate_ge_embed(item_name, item_data))
@commands.command(aliases = ["tms"])
async def traveling_merchant(self, ctx, option=None, *query):
item_name = None
item_query = None
if(query and len(query) > 0):
item_query, item_name = RB.encode_item_name(query)
tms_date = []
tms_data = []
if(option == None):
# Query rswiki for todays Traveling Merchant Stock
url = "https://api.weirdgloop.org/runescape/tms/current"
# Get todays stock
tms_data.append(self.rswiki_session.get(url).json())
tms_date.append(datetime.date.today())
elif(option == "2"):
# Query rswiki for tomorrows Traveling Merchant Stock
url = "https://api.weirdgloop.org/runescape/tms/next"
# Get tommorrows stock
tms_data.append(self.rswiki_session.get(url).json())
# Get tomorrows date
tms_date.append(datetime.date.today())
tms_date[0] += datetime.timedelta(days=1)
elif(option == "3"):
# Query rswiki to search for next appearance of query in Traveling Merchant Stock
url = "https://api.weirdgloop.org/runescape/tms/search?name={}".format(item_query)
# Submit query
tms_query_result = self.rswiki_session.get(url).json()
# Get first occurance of queried item in stock if the query was successful
if(isinstance(tms_query_result, list) and len(tms_query_result) > 0):
for i in range(0,len(tms_query_result)):
daily_stock = tms_query_result[i]["items"]
tms_data.append(daily_stock)
tms_date.append(datetime.datetime.strptime(tms_query_result[i]['date'], "%d %B %Y"))
if(len(tms_data) > 0):
await ctx.send(embed=RB.generate_tms_embed(tms_data, tms_date, item_name))
else:
await ctx.send("Could not find specified stock.")
@commands.command(aliases = ["tmsr"])
async def tms_reminder(self, ctx, *query):
item_query = None
item_name = None
if(query and len(query) > 0):
item_query, item_name = RB.encode_item_name(query)
# Requested item reminder is valid, record user id / requested reminder(s)
if(item_query):
RB.tms_set_reminder(item_name, ctx.author.id)
await ctx.send("Now tracking {} for {}".format(item_name, ctx.author.name))
else:
# Check if item requested for reminders is valid
await ctx.send("Could not track {}".format(item_name))
@commands.command(aliases = ["setreminder", "asr"])
@commands.has_permissions(administrator=True)
async def admin_set_reminder(self, ctx):
with open("cogs/base/data/tms_reminder_cid.txt", "w") as cid_file:
cid_file.write(str(ctx.channel.id))
await ctx.send("Now tracking reminders in {}".format(ctx.channel.name))
@commands.command(aliases = ["tmsl"])
async def tms_list(self, ctx):
await ctx.send(embed=RB.generate_tmsl_embed())
def setup(bot):
bot.add_cog(Runescape(bot)) | import discord
from discord.ext import commands
import datetime
import requests
import json
from .base import runescape_base as RB
class Runescape(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.rswiki_session = requests.Session()
self.rswiki_session.headers["User-Agent"] = "Discord bot for querying grand exchange data."
@commands.command(aliases = ["ge"])
async def grand_exchange(self, ctx, *item_name):
item_name = str(" ".join(item_name)).capitalize()
print("Search for:", item_name)
# Query rswiki for GE data
url = "https://api.weirdgloop.org/exchange/history/rs/latest"
data = self.rswiki_session.get(url+"?name={}".format(item_name))
item_name = list(data.json().keys())[0]
item_data = data.json()[item_name]
await ctx.channel.send(embed=RB.generate_ge_embed(item_name, item_data))
@commands.command(aliases = ["tms"])
async def traveling_merchant(self, ctx, option=None, *query):
item_name = None
item_query = None
if(query and len(query) > 0):
item_query, item_name = RB.encode_item_name(query)
tms_date = []
tms_data = []
if(option == None):
# Query rswiki for todays Traveling Merchant Stock
url = "https://api.weirdgloop.org/runescape/tms/current"
# Get todays stock
tms_data.append(self.rswiki_session.get(url).json())
tms_date.append(datetime.date.today())
elif(option == "2"):
# Query rswiki for tomorrows Traveling Merchant Stock
url = "https://api.weirdgloop.org/runescape/tms/next"
# Get tommorrows stock
tms_data.append(self.rswiki_session.get(url).json())
# Get tomorrows date
tms_date.append(datetime.date.today())
tms_date[0] += datetime.timedelta(days=1)
elif(option == "3"):
# Query rswiki to search for next appearance of query in Traveling Merchant Stock
url = "https://api.weirdgloop.org/runescape/tms/search?name={}".format(item_query)
# Submit query
tms_query_result = self.rswiki_session.get(url).json()
# Get first occurance of queried item in stock if the query was successful
if(isinstance(tms_query_result, list) and len(tms_query_result) > 0):
for i in range(0,len(tms_query_result)):
daily_stock = tms_query_result[i]["items"]
tms_data.append(daily_stock)
tms_date.append(datetime.datetime.strptime(tms_query_result[i]['date'], "%d %B %Y"))
if(len(tms_data) > 0):
await ctx.send(embed=RB.generate_tms_embed(tms_data, tms_date, item_name))
else:
await ctx.send("Could not find specified stock.")
@commands.command(aliases = ["tmsr"])
async def tms_reminder(self, ctx, *query):
item_query = None
item_name = None
if(query and len(query) > 0):
item_query, item_name = RB.encode_item_name(query)
# Requested item reminder is valid, record user id / requested reminder(s)
if(item_query):
RB.tms_set_reminder(item_name, ctx.author.id)
await ctx.send("Now tracking {} for {}".format(item_name, ctx.author.name))
else:
# Check if item requested for reminders is valid
await ctx.send("Could not track {}".format(item_name))
@commands.command(aliases = ["setreminder", "asr"])
@commands.has_permissions(administrator=True)
async def admin_set_reminder(self, ctx):
with open("cogs/base/data/tms_reminder_cid.txt", "w") as cid_file:
cid_file.write(str(ctx.channel.id))
await ctx.send("Now tracking reminders in {}".format(ctx.channel.name))
@commands.command(aliases = ["tmsl"])
async def tms_list(self, ctx):
await ctx.send(embed=RB.generate_tmsl_embed())
def setup(bot):
bot.add_cog(Runescape(bot)) | en | 0.766761 | # Query rswiki for GE data # Query rswiki for todays Traveling Merchant Stock # Get todays stock # Query rswiki for tomorrows Traveling Merchant Stock # Get tommorrows stock # Get tomorrows date # Query rswiki to search for next appearance of query in Traveling Merchant Stock # Submit query # Get first occurance of queried item in stock if the query was successful # Requested item reminder is valid, record user id / requested reminder(s) # Check if item requested for reminders is valid | 2.86049 | 3 |
Intro/depositProfit.py | sumeyaali/Code_Challenges | 0 | 6618787 | def depositProfit(deposit, rate, threshold):
rate = rate/100
years = 0
while deposit < threshold:
deposit += deposit * rate
years += 1
return years
| def depositProfit(deposit, rate, threshold):
rate = rate/100
years = 0
while deposit < threshold:
deposit += deposit * rate
years += 1
return years
| none | 1 | 3.103621 | 3 | |
print-random-sorted-linked-list.py | terencestone/algo-practice | 0 | 6618788 | <reponame>terencestone/algo-practice
class Node:
def __init__(self, data=None):
self.next = None
self.data = data
def __str__(self):
return f'(data: {self.data}, next: {str(self.next)})'
class LinkedList:
def __init__(self, head=None):
self.head = head
def __str__(self):
if not self.head:
return str([])
l = []
current = self.head
while current:
l.append(str(current))
if current.next:
current = current.next
else:
current = None
return str(l)
def append(self, data):
if not self.head:
self.head = Node(data)
return
current = self.head
while current.next:
current = current.next
current.next = Node(data)
def prepend(self, data):
new_head = Node(data)
new_head.next = self.head
self.head = new_head
def delete_with_value(self, data):
head = self.head
if not head: return
if head.data == data:
self.head = head.next
return
current = head
while current.next != None:
if current.next.data == data:
current.next = current.next.next
current = current.next
def merge_sort(h):
if not h or not h.next: return h
mid = get_middle(h)
right_half = mid.next
mid.next = None
left = merge_sort(h)
right = merge_sort(right_half)
result = sorted_merge(left, right)
return result
def sorted_merge(left, right):
if not left: return right
if not right: return left
result = None
if left.data <= right.data:
result = left
result.next = sorted_merge(left.next, right)
else:
result = right
result.next = sorted_merge(left, right.next)
return result
def get_middle(h):
if not h: return h
fast = h
slow = h
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
return slow
# run
import random
length = random.randint(0,100)
l = LinkedList()
for _ in range(0,length):
l.append(random.randint(0, 100))
print(merge_sort(l.head))
| class Node:
def __init__(self, data=None):
self.next = None
self.data = data
def __str__(self):
return f'(data: {self.data}, next: {str(self.next)})'
class LinkedList:
def __init__(self, head=None):
self.head = head
def __str__(self):
if not self.head:
return str([])
l = []
current = self.head
while current:
l.append(str(current))
if current.next:
current = current.next
else:
current = None
return str(l)
def append(self, data):
if not self.head:
self.head = Node(data)
return
current = self.head
while current.next:
current = current.next
current.next = Node(data)
def prepend(self, data):
new_head = Node(data)
new_head.next = self.head
self.head = new_head
def delete_with_value(self, data):
head = self.head
if not head: return
if head.data == data:
self.head = head.next
return
current = head
while current.next != None:
if current.next.data == data:
current.next = current.next.next
current = current.next
def merge_sort(h):
if not h or not h.next: return h
mid = get_middle(h)
right_half = mid.next
mid.next = None
left = merge_sort(h)
right = merge_sort(right_half)
result = sorted_merge(left, right)
return result
def sorted_merge(left, right):
if not left: return right
if not right: return left
result = None
if left.data <= right.data:
result = left
result.next = sorted_merge(left.next, right)
else:
result = right
result.next = sorted_merge(left, right.next)
return result
def get_middle(h):
if not h: return h
fast = h
slow = h
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
return slow
# run
import random
length = random.randint(0,100)
l = LinkedList()
for _ in range(0,length):
l.append(random.randint(0, 100))
print(merge_sort(l.head)) | none | 1 | 3.780881 | 4 | |
tests/pipeline_manager/test_pipeline_manager_timestamp_checkpoint_filepath.py | fhalbritter/pypiper | 24 | 6618789 | <filename>tests/pipeline_manager/test_pipeline_manager_timestamp_checkpoint_filepath.py
""" Tests for construction of checkpoint filepath """
import glob
import os
import time
from pypiper import PipelineManager
from pypiper.const import CHECKPOINT_EXTENSION
from pypiper.stage import Stage
from tests.helpers import named_param
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class DummyPM(PipelineManager):
""" Simple override of true PipelineManager, for __init__ simplicity """
def __init__(self, name, outfolder):
self.name = name
self.outfolder = outfolder
self.start_point = None
self.stop_before = None
self.stop_after = None
self.halt_on_next = False
self.last_timestamp = time.time()
self.prev_checkpoint = None
self.curr_checkpoint = None
class PipelineMangerTimestampCheckpointFilePathTests:
""" Tests for determination of checkpoint filepath. """
@named_param(argnames=["name1", "name2"],
argvalues=[("chipseq", "ATACseq"), ("rnaKallisto", "wgbs")])
@named_param(argnames="spec_type",
argvalues=["stage_name", "stage", "function"])
def test_distinguishes_pipelines_within_outfolder(
self, name1, name2, spec_type, tmpdir):
"""
Checkpoint files within sample folder include pipeline name.
More specifically, we often have the case that a single sample's
name is the name of a subfolder, within the broader results
directory for an entire project, in which to store output files
associated with that particular sample. The sample in that case may
be associated with a protocol that maps to multiple pipelines, and
thus the sample may be processed by multiple pipelines. If each
pipeline had a unique set of stage names, we'd be fine with no
additional measures, but to avoid a checkpoint filename collision,
in which we would be unable to know which pipeline had generated
a given checkpoint file, we add the pipeline name to the checkpoint
file and assume that we're not processing the sample with multiple
identically named pipelines.
"""
# Define a dummy function to use as the callable for a Stage.
def trim_reads():
pass
def stage_spec():
if spec_type == "function":
return trim_reads
elif spec_type not in ["stage", "stage_name"]:
raise ValueError("Unrecognized stage specification type: {}".
format(spec_type))
else:
s = Stage(trim_reads)
return s.name if spec_type == "stage_name" else s
outfolder = tmpdir.strpath
# At start, we should have no checkpoints.
all_checkpoints_pattern = os.path.join(outfolder, "*" + CHECKPOINT_EXTENSION)
assert [] == glob.glob(all_checkpoints_pattern)
plm1 = DummyPM(name1, outfolder)
plm2 = DummyPM(name2, outfolder)
checkpoint_name = "trim_reads"
plm1.timestamp(checkpoint=stage_spec(), finished=True)
# Find the checkpoints; there should only be one.
checkpoint_pattern = os.path.join(
outfolder, "{}_*{}".format(name1, CHECKPOINT_EXTENSION))
checkpoints = glob.glob(checkpoint_pattern)
assert 1 == len(checkpoints)
assert 1 == len(glob.glob(all_checkpoints_pattern))
# Check that we have the expected checkpoint.
exp_chkpt_fpath = os.path.join(outfolder, "{}_{}".format(
name1, checkpoint_name + CHECKPOINT_EXTENSION))
assert exp_chkpt_fpath == checkpoints[0]
# Create a second checkpoint with the same stage, but with a manager
# of a different name.
plm2.timestamp(checkpoint=stage_spec(), finished=True)
checkpoint_pattern = os.path.join(
outfolder, "{}_*{}".format(name2, CHECKPOINT_EXTENSION))
checkpoints = glob.glob(checkpoint_pattern)
assert 1 == len(checkpoints)
all_checkpoints = glob.glob(all_checkpoints_pattern)
assert 2 == len(all_checkpoints)
exp_chkpt_fpath_2 = os.path.join(outfolder, "{}_{}".format(
name2, checkpoint_name + CHECKPOINT_EXTENSION))
assert {exp_chkpt_fpath, exp_chkpt_fpath_2} == set(all_checkpoints)
| <filename>tests/pipeline_manager/test_pipeline_manager_timestamp_checkpoint_filepath.py
""" Tests for construction of checkpoint filepath """
import glob
import os
import time
from pypiper import PipelineManager
from pypiper.const import CHECKPOINT_EXTENSION
from pypiper.stage import Stage
from tests.helpers import named_param
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class DummyPM(PipelineManager):
""" Simple override of true PipelineManager, for __init__ simplicity """
def __init__(self, name, outfolder):
self.name = name
self.outfolder = outfolder
self.start_point = None
self.stop_before = None
self.stop_after = None
self.halt_on_next = False
self.last_timestamp = time.time()
self.prev_checkpoint = None
self.curr_checkpoint = None
class PipelineMangerTimestampCheckpointFilePathTests:
""" Tests for determination of checkpoint filepath. """
@named_param(argnames=["name1", "name2"],
argvalues=[("chipseq", "ATACseq"), ("rnaKallisto", "wgbs")])
@named_param(argnames="spec_type",
argvalues=["stage_name", "stage", "function"])
def test_distinguishes_pipelines_within_outfolder(
self, name1, name2, spec_type, tmpdir):
"""
Checkpoint files within sample folder include pipeline name.
More specifically, we often have the case that a single sample's
name is the name of a subfolder, within the broader results
directory for an entire project, in which to store output files
associated with that particular sample. The sample in that case may
be associated with a protocol that maps to multiple pipelines, and
thus the sample may be processed by multiple pipelines. If each
pipeline had a unique set of stage names, we'd be fine with no
additional measures, but to avoid a checkpoint filename collision,
in which we would be unable to know which pipeline had generated
a given checkpoint file, we add the pipeline name to the checkpoint
file and assume that we're not processing the sample with multiple
identically named pipelines.
"""
# Define a dummy function to use as the callable for a Stage.
def trim_reads():
pass
def stage_spec():
if spec_type == "function":
return trim_reads
elif spec_type not in ["stage", "stage_name"]:
raise ValueError("Unrecognized stage specification type: {}".
format(spec_type))
else:
s = Stage(trim_reads)
return s.name if spec_type == "stage_name" else s
outfolder = tmpdir.strpath
# At start, we should have no checkpoints.
all_checkpoints_pattern = os.path.join(outfolder, "*" + CHECKPOINT_EXTENSION)
assert [] == glob.glob(all_checkpoints_pattern)
plm1 = DummyPM(name1, outfolder)
plm2 = DummyPM(name2, outfolder)
checkpoint_name = "trim_reads"
plm1.timestamp(checkpoint=stage_spec(), finished=True)
# Find the checkpoints; there should only be one.
checkpoint_pattern = os.path.join(
outfolder, "{}_*{}".format(name1, CHECKPOINT_EXTENSION))
checkpoints = glob.glob(checkpoint_pattern)
assert 1 == len(checkpoints)
assert 1 == len(glob.glob(all_checkpoints_pattern))
# Check that we have the expected checkpoint.
exp_chkpt_fpath = os.path.join(outfolder, "{}_{}".format(
name1, checkpoint_name + CHECKPOINT_EXTENSION))
assert exp_chkpt_fpath == checkpoints[0]
# Create a second checkpoint with the same stage, but with a manager
# of a different name.
plm2.timestamp(checkpoint=stage_spec(), finished=True)
checkpoint_pattern = os.path.join(
outfolder, "{}_*{}".format(name2, CHECKPOINT_EXTENSION))
checkpoints = glob.glob(checkpoint_pattern)
assert 1 == len(checkpoints)
all_checkpoints = glob.glob(all_checkpoints_pattern)
assert 2 == len(all_checkpoints)
exp_chkpt_fpath_2 = os.path.join(outfolder, "{}_{}".format(
name2, checkpoint_name + CHECKPOINT_EXTENSION))
assert {exp_chkpt_fpath, exp_chkpt_fpath_2} == set(all_checkpoints)
| en | 0.948589 | Tests for construction of checkpoint filepath Simple override of true PipelineManager, for __init__ simplicity Tests for determination of checkpoint filepath. Checkpoint files within sample folder include pipeline name. More specifically, we often have the case that a single sample's name is the name of a subfolder, within the broader results directory for an entire project, in which to store output files associated with that particular sample. The sample in that case may be associated with a protocol that maps to multiple pipelines, and thus the sample may be processed by multiple pipelines. If each pipeline had a unique set of stage names, we'd be fine with no additional measures, but to avoid a checkpoint filename collision, in which we would be unable to know which pipeline had generated a given checkpoint file, we add the pipeline name to the checkpoint file and assume that we're not processing the sample with multiple identically named pipelines. # Define a dummy function to use as the callable for a Stage. # At start, we should have no checkpoints. # Find the checkpoints; there should only be one. # Check that we have the expected checkpoint. # Create a second checkpoint with the same stage, but with a manager # of a different name. | 2.383616 | 2 |
backend/conferences/migrations/0021_allow_adding_a_topic_for_a_keynote.py | pauloxnet/pycon | 2 | 6618790 | <reponame>pauloxnet/pycon<gh_stars>1-10
# Generated by Django 3.2.9 on 2021-12-31 20:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('conferences', '0020_rename_keynote_fields'),
]
operations = [
migrations.AddField(
model_name='keynote',
name='topic',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='conferences.topic'),
),
]
| # Generated by Django 3.2.9 on 2021-12-31 20:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('conferences', '0020_rename_keynote_fields'),
]
operations = [
migrations.AddField(
model_name='keynote',
name='topic',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, to='conferences.topic'),
),
] | en | 0.884471 | # Generated by Django 3.2.9 on 2021-12-31 20:07 | 1.458093 | 1 |
graph_db/extractor.py | msk-mind/graph-db | 0 | 6618791 | <reponame>msk-mind/graph-db
'''
Created on April 22, 2021
@author: <EMAIL>
'''
import glob
import os
import shutil
from graph_db.common.config import APP_CFG, ConfigSet, DATA_CFG
from graph_db.common.custom_logger import init_logger
from graph_db.common.spark_session import SparkConfig
logger = init_logger()
def get_df(delta_path):
"""
Read delta table from the specified delta_path and return the dataframe.
:param delta_path: path to delta_table
:return: spark dataframe
"""
logger.info('extracting delta table ' + delta_path)
spark = SparkConfig().spark_session(config_name=APP_CFG, app_name="grapb_db")
df = spark.read.format('delta').load(delta_path)
logger.info('dataframe has ' + str(df.count()) + ' records')
return df
def df_to_json(df, json_path):
"""
Convert specified spark dataframe to json and write to the specified json_path.
:param df: spark dataframe to convert
:param json_path: path where json should be written
"""
df.coalesce(1).write.format('json').save(json_path)
json_file = glob.glob(json_path+'/*.json')[0]
new_json_file = json_path+'/part.json'
os.rename(json_file, new_json_file)
logger.info('wrote json for ' + str(df.count()) + ' records to '+new_json_file)
def delta_to_json(delta_path, json_path):
"""
Read delta table and write to json file.
:param delta_path: path to delta table
:param json_path: path to json table
"""
cfg = ConfigSet()
overwrite_json = cfg.get_value(DATA_CFG + '::$.overwrite_json')
if os.path.exists(json_path):
if overwrite_json:
logger.info('overwriting ' + json_path)
shutil.rmtree(json_path)
df = get_df(delta_path)
df_to_json(df, json_path)
else:
logger.info('skipping writing of json for ' + delta_path)
else:
df = get_df(delta_path)
df_to_json(df, json_path)
| '''
Created on April 22, 2021
@author: <EMAIL>
'''
import glob
import os
import shutil
from graph_db.common.config import APP_CFG, ConfigSet, DATA_CFG
from graph_db.common.custom_logger import init_logger
from graph_db.common.spark_session import SparkConfig
logger = init_logger()
def get_df(delta_path):
"""
Read delta table from the specified delta_path and return the dataframe.
:param delta_path: path to delta_table
:return: spark dataframe
"""
logger.info('extracting delta table ' + delta_path)
spark = SparkConfig().spark_session(config_name=APP_CFG, app_name="grapb_db")
df = spark.read.format('delta').load(delta_path)
logger.info('dataframe has ' + str(df.count()) + ' records')
return df
def df_to_json(df, json_path):
"""
Convert specified spark dataframe to json and write to the specified json_path.
:param df: spark dataframe to convert
:param json_path: path where json should be written
"""
df.coalesce(1).write.format('json').save(json_path)
json_file = glob.glob(json_path+'/*.json')[0]
new_json_file = json_path+'/part.json'
os.rename(json_file, new_json_file)
logger.info('wrote json for ' + str(df.count()) + ' records to '+new_json_file)
def delta_to_json(delta_path, json_path):
"""
Read delta table and write to json file.
:param delta_path: path to delta table
:param json_path: path to json table
"""
cfg = ConfigSet()
overwrite_json = cfg.get_value(DATA_CFG + '::$.overwrite_json')
if os.path.exists(json_path):
if overwrite_json:
logger.info('overwriting ' + json_path)
shutil.rmtree(json_path)
df = get_df(delta_path)
df_to_json(df, json_path)
else:
logger.info('skipping writing of json for ' + delta_path)
else:
df = get_df(delta_path)
df_to_json(df, json_path) | en | 0.496002 | Created on April 22, 2021 @author: <EMAIL> Read delta table from the specified delta_path and return the dataframe. :param delta_path: path to delta_table :return: spark dataframe Convert specified spark dataframe to json and write to the specified json_path. :param df: spark dataframe to convert :param json_path: path where json should be written Read delta table and write to json file. :param delta_path: path to delta table :param json_path: path to json table | 2.270134 | 2 |
soft_intro_vae_3d/metrics/jsd.py | BossunWang/soft-intro-vae-pytorch | 144 | 6618792 | import numpy as np
import torch
from numpy.linalg import norm
from scipy.stats import entropy
from sklearn.neighbors import NearestNeighbors
__all__ = ['js_divercence_between_pc', 'jsd_between_point_cloud_sets']
#
# Compute JS divergence
#
def js_divercence_between_pc(pc1: torch.Tensor, pc2: torch.Tensor,
voxels: int = 64) -> float:
"""Method for computing JSD from 2 sets of point clouds."""
pc1_ = _pc_to_voxel_distribution(pc1, voxels)
pc2_ = _pc_to_voxel_distribution(pc2, voxels)
jsd = _js_divergence(pc1_, pc2_)
return jsd
def _js_divergence(P, Q):
# Ensure probabilities.
P_ = P / np.sum(P)
Q_ = Q / np.sum(Q)
# Calculate JSD using scipy.stats.entropy()
e1 = entropy(P_, base=2)
e2 = entropy(Q_, base=2)
e_sum = entropy((P_ + Q_) / 2.0, base=2)
res1 = e_sum - ((e1 + e2) / 2.0)
# Calcujate JS-Div using manually defined KL divergence.
# res2 = _jsdiv(P_, Q_)
#
# if not np.allclose(res1, res2, atol=10e-5, rtol=0):
# warnings.warn('Numerical values of two JSD methods don\'t agree.')
return res1
def _jsdiv(P, Q):
"""Another way of computing JSD to check numerical stability."""
def _kldiv(A, B):
a = A.copy()
b = B.copy()
idx = np.logical_and(a > 0, b > 0)
a = a[idx]
b = b[idx]
return np.sum([v for v in a * np.log2(a / b)])
P_ = P / np.sum(P)
Q_ = Q / np.sum(Q)
M = 0.5 * (P_ + Q_)
return 0.5 * (_kldiv(P_, M) + _kldiv(Q_, M))
def _pc_to_voxel_distribution(pc: torch.Tensor, n_voxels: int = 64) -> np.ndarray:
pc_ = pc.clamp(-0.5, 0.4999) + 0.5
# Because points are in range [0, 1], simple multiplication will bin them.
pc_ = (pc_ * n_voxels).int()
pc_ = pc_[:, :, 0] * n_voxels ** 2 + pc_[:, :, 1] * n_voxels + pc_[:, :, 2]
B = np.zeros(n_voxels**3, dtype=np.int32)
values, amounts = np.unique(pc_, return_counts=True)
B[values] = amounts
return B
#
# Stanford way to calculate JSD
#
def jsd_between_point_cloud_sets(sample_pcs, ref_pcs, voxels=28,
in_unit_sphere=True):
"""Computes the JSD between two sets of point-clouds, as introduced in the
paper ```Learning Representations And Generative Models For 3D Point
Clouds```.
Args:
sample_pcs: (np.ndarray S1xR2x3) S1 point-clouds, each of R1 points.
ref_pcs: (np.ndarray S2xR2x3) S2 point-clouds, each of R2 points.
voxels: (int) grid-resolution. Affects granularity of measurements.
"""
sample_grid_var = _entropy_of_occupancy_grid(sample_pcs, voxels,
in_unit_sphere)[1]
ref_grid_var = _entropy_of_occupancy_grid(ref_pcs, voxels,
in_unit_sphere)[1]
return _js_divergence(sample_grid_var, ref_grid_var)
def _entropy_of_occupancy_grid(pclouds, grid_resolution, in_sphere=False):
"""Given a collection of point-clouds, estimate the entropy of the random
variables corresponding to occupancy-grid activation patterns.
Inputs:
pclouds: (numpy array) #point-clouds x points per point-cloud x 3
grid_resolution (int) size of occupancy grid that will be used.
"""
pclouds = pclouds.cpu().numpy()
epsilon = 10e-4
bound = 0.5 + epsilon
# if abs(np.max(pclouds)) > bound or abs(np.min(pclouds)) > bound:
# warnings.warn('Point-clouds are not in unit cube.')
#
# if in_sphere and np.max(np.sqrt(np.sum(pclouds ** 2, axis=2))) > bound:
# warnings.warn('Point-clouds are not in unit sphere.')
grid_coordinates, _ = _unit_cube_grid_point_cloud(grid_resolution, in_sphere)
grid_coordinates = grid_coordinates.reshape(-1, 3)
grid_counters = np.zeros(len(grid_coordinates))
grid_bernoulli_rvars = np.zeros(len(grid_coordinates))
nn = NearestNeighbors(n_neighbors=1).fit(grid_coordinates)
for pc in pclouds:
_, indices = nn.kneighbors(pc)
indices = np.squeeze(indices)
for i in indices:
grid_counters[i] += 1
indices = np.unique(indices)
for i in indices:
grid_bernoulli_rvars[i] += 1
acc_entropy = 0.0
n = float(len(pclouds))
for g in grid_bernoulli_rvars:
p = 0.0
if g > 0:
p = float(g) / n
acc_entropy += entropy([p, 1.0 - p])
return acc_entropy / len(grid_counters), grid_counters
def _unit_cube_grid_point_cloud(resolution, clip_sphere=False):
"""Returns the center coordinates of each cell of a 3D grid with resolution^3 cells,
that is placed in the unit-cube.
If clip_sphere it True it drops the "corner" cells that lie outside the unit-sphere.
"""
grid = np.ndarray((resolution, resolution, resolution, 3), np.float32)
spacing = 1.0 / float(resolution - 1)
for i in range(resolution):
for j in range(resolution):
for k in range(resolution):
grid[i, j, k, 0] = i * spacing - 0.5
grid[i, j, k, 1] = j * spacing - 0.5
grid[i, j, k, 2] = k * spacing - 0.5
if clip_sphere:
grid = grid.reshape(-1, 3)
grid = grid[norm(grid, axis=1) <= 0.5]
return grid, spacing
| import numpy as np
import torch
from numpy.linalg import norm
from scipy.stats import entropy
from sklearn.neighbors import NearestNeighbors
__all__ = ['js_divercence_between_pc', 'jsd_between_point_cloud_sets']
#
# Compute JS divergence
#
def js_divercence_between_pc(pc1: torch.Tensor, pc2: torch.Tensor,
voxels: int = 64) -> float:
"""Method for computing JSD from 2 sets of point clouds."""
pc1_ = _pc_to_voxel_distribution(pc1, voxels)
pc2_ = _pc_to_voxel_distribution(pc2, voxels)
jsd = _js_divergence(pc1_, pc2_)
return jsd
def _js_divergence(P, Q):
# Ensure probabilities.
P_ = P / np.sum(P)
Q_ = Q / np.sum(Q)
# Calculate JSD using scipy.stats.entropy()
e1 = entropy(P_, base=2)
e2 = entropy(Q_, base=2)
e_sum = entropy((P_ + Q_) / 2.0, base=2)
res1 = e_sum - ((e1 + e2) / 2.0)
# Calcujate JS-Div using manually defined KL divergence.
# res2 = _jsdiv(P_, Q_)
#
# if not np.allclose(res1, res2, atol=10e-5, rtol=0):
# warnings.warn('Numerical values of two JSD methods don\'t agree.')
return res1
def _jsdiv(P, Q):
"""Another way of computing JSD to check numerical stability."""
def _kldiv(A, B):
a = A.copy()
b = B.copy()
idx = np.logical_and(a > 0, b > 0)
a = a[idx]
b = b[idx]
return np.sum([v for v in a * np.log2(a / b)])
P_ = P / np.sum(P)
Q_ = Q / np.sum(Q)
M = 0.5 * (P_ + Q_)
return 0.5 * (_kldiv(P_, M) + _kldiv(Q_, M))
def _pc_to_voxel_distribution(pc: torch.Tensor, n_voxels: int = 64) -> np.ndarray:
pc_ = pc.clamp(-0.5, 0.4999) + 0.5
# Because points are in range [0, 1], simple multiplication will bin them.
pc_ = (pc_ * n_voxels).int()
pc_ = pc_[:, :, 0] * n_voxels ** 2 + pc_[:, :, 1] * n_voxels + pc_[:, :, 2]
B = np.zeros(n_voxels**3, dtype=np.int32)
values, amounts = np.unique(pc_, return_counts=True)
B[values] = amounts
return B
#
# Stanford way to calculate JSD
#
def jsd_between_point_cloud_sets(sample_pcs, ref_pcs, voxels=28,
in_unit_sphere=True):
"""Computes the JSD between two sets of point-clouds, as introduced in the
paper ```Learning Representations And Generative Models For 3D Point
Clouds```.
Args:
sample_pcs: (np.ndarray S1xR2x3) S1 point-clouds, each of R1 points.
ref_pcs: (np.ndarray S2xR2x3) S2 point-clouds, each of R2 points.
voxels: (int) grid-resolution. Affects granularity of measurements.
"""
sample_grid_var = _entropy_of_occupancy_grid(sample_pcs, voxels,
in_unit_sphere)[1]
ref_grid_var = _entropy_of_occupancy_grid(ref_pcs, voxels,
in_unit_sphere)[1]
return _js_divergence(sample_grid_var, ref_grid_var)
def _entropy_of_occupancy_grid(pclouds, grid_resolution, in_sphere=False):
"""Given a collection of point-clouds, estimate the entropy of the random
variables corresponding to occupancy-grid activation patterns.
Inputs:
pclouds: (numpy array) #point-clouds x points per point-cloud x 3
grid_resolution (int) size of occupancy grid that will be used.
"""
pclouds = pclouds.cpu().numpy()
epsilon = 10e-4
bound = 0.5 + epsilon
# if abs(np.max(pclouds)) > bound or abs(np.min(pclouds)) > bound:
# warnings.warn('Point-clouds are not in unit cube.')
#
# if in_sphere and np.max(np.sqrt(np.sum(pclouds ** 2, axis=2))) > bound:
# warnings.warn('Point-clouds are not in unit sphere.')
grid_coordinates, _ = _unit_cube_grid_point_cloud(grid_resolution, in_sphere)
grid_coordinates = grid_coordinates.reshape(-1, 3)
grid_counters = np.zeros(len(grid_coordinates))
grid_bernoulli_rvars = np.zeros(len(grid_coordinates))
nn = NearestNeighbors(n_neighbors=1).fit(grid_coordinates)
for pc in pclouds:
_, indices = nn.kneighbors(pc)
indices = np.squeeze(indices)
for i in indices:
grid_counters[i] += 1
indices = np.unique(indices)
for i in indices:
grid_bernoulli_rvars[i] += 1
acc_entropy = 0.0
n = float(len(pclouds))
for g in grid_bernoulli_rvars:
p = 0.0
if g > 0:
p = float(g) / n
acc_entropy += entropy([p, 1.0 - p])
return acc_entropy / len(grid_counters), grid_counters
def _unit_cube_grid_point_cloud(resolution, clip_sphere=False):
"""Returns the center coordinates of each cell of a 3D grid with resolution^3 cells,
that is placed in the unit-cube.
If clip_sphere it True it drops the "corner" cells that lie outside the unit-sphere.
"""
grid = np.ndarray((resolution, resolution, resolution, 3), np.float32)
spacing = 1.0 / float(resolution - 1)
for i in range(resolution):
for j in range(resolution):
for k in range(resolution):
grid[i, j, k, 0] = i * spacing - 0.5
grid[i, j, k, 1] = j * spacing - 0.5
grid[i, j, k, 2] = k * spacing - 0.5
if clip_sphere:
grid = grid.reshape(-1, 3)
grid = grid[norm(grid, axis=1) <= 0.5]
return grid, spacing
| en | 0.662408 | # # Compute JS divergence # Method for computing JSD from 2 sets of point clouds. # Ensure probabilities. # Calculate JSD using scipy.stats.entropy() # Calcujate JS-Div using manually defined KL divergence. # res2 = _jsdiv(P_, Q_) # # if not np.allclose(res1, res2, atol=10e-5, rtol=0): # warnings.warn('Numerical values of two JSD methods don\'t agree.') Another way of computing JSD to check numerical stability. # Because points are in range [0, 1], simple multiplication will bin them. # # Stanford way to calculate JSD # Computes the JSD between two sets of point-clouds, as introduced in the paper ```Learning Representations And Generative Models For 3D Point Clouds```. Args: sample_pcs: (np.ndarray S1xR2x3) S1 point-clouds, each of R1 points. ref_pcs: (np.ndarray S2xR2x3) S2 point-clouds, each of R2 points. voxels: (int) grid-resolution. Affects granularity of measurements. Given a collection of point-clouds, estimate the entropy of the random variables corresponding to occupancy-grid activation patterns. Inputs: pclouds: (numpy array) #point-clouds x points per point-cloud x 3 grid_resolution (int) size of occupancy grid that will be used. # if abs(np.max(pclouds)) > bound or abs(np.min(pclouds)) > bound: # warnings.warn('Point-clouds are not in unit cube.') # # if in_sphere and np.max(np.sqrt(np.sum(pclouds ** 2, axis=2))) > bound: # warnings.warn('Point-clouds are not in unit sphere.') Returns the center coordinates of each cell of a 3D grid with resolution^3 cells, that is placed in the unit-cube. If clip_sphere it True it drops the "corner" cells that lie outside the unit-sphere. | 2.495349 | 2 |
org/bobink/my_eval/evaluator.py | bobink/my_python_eval | 0 | 6618793 | from org.bobink.my_eval.eval_expression import EvalExpressionVisitor, EvalBinOp, EvalValue, EvalExpression, EvalBinOpType
class _EvalExpressionEvaluator(EvalExpressionVisitor):
def __init__(self):
self.result = 0
def visit_bin_op(self, e: EvalBinOp):
op = e.get_type()
left = eval_expression(e.get_left())
right = eval_expression(e.get_right())
self.result = self.eval_bin_op(op, left, right)
@staticmethod
def eval_bin_op(op: EvalBinOpType, left: int, right: int) -> int:
if op == EvalBinOpType.PLUS:
return left + right
elif op == EvalBinOpType.MINUS:
return left - right
elif op == EvalBinOpType.TIMES:
return left * right
elif op == EvalBinOpType.DIV:
return int(left / right)
else:
raise ValueError
def visit_value(self, e: EvalValue):
self.result = e.get_value()
def get_result(self):
return self.result
def eval_expression(e: EvalExpression) -> int:
visitor = _EvalExpressionEvaluator()
e.accept(visitor)
return visitor.get_result()
| from org.bobink.my_eval.eval_expression import EvalExpressionVisitor, EvalBinOp, EvalValue, EvalExpression, EvalBinOpType
class _EvalExpressionEvaluator(EvalExpressionVisitor):
def __init__(self):
self.result = 0
def visit_bin_op(self, e: EvalBinOp):
op = e.get_type()
left = eval_expression(e.get_left())
right = eval_expression(e.get_right())
self.result = self.eval_bin_op(op, left, right)
@staticmethod
def eval_bin_op(op: EvalBinOpType, left: int, right: int) -> int:
if op == EvalBinOpType.PLUS:
return left + right
elif op == EvalBinOpType.MINUS:
return left - right
elif op == EvalBinOpType.TIMES:
return left * right
elif op == EvalBinOpType.DIV:
return int(left / right)
else:
raise ValueError
def visit_value(self, e: EvalValue):
self.result = e.get_value()
def get_result(self):
return self.result
def eval_expression(e: EvalExpression) -> int:
visitor = _EvalExpressionEvaluator()
e.accept(visitor)
return visitor.get_result()
| none | 1 | 2.979779 | 3 | |
src/sqlite_connector.py | harrydaihaolin/Stock-Trading-Server | 0 | 6618794 | import logging
logging = logging.getLogger()
import sqlite3
import csv
import constants
from functools import reduce
from os import walk, path
def get_latest_signal(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT signal from {} limit 1'.format(ticker))
rows = cur.fetchall()
con.close()
return rows
except Exception as e:
logging.error(e)
def get_signal(date, ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT signal from {} where timestamp={}'.format(ticker, date))
rows = cur.fetchall()
con.close()
return rows
except Exception as e:
logging.error(e)
def get_rolling_data(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT timestamp, close FROM {}'.format(ticker))
rolling_data = cur.fetchall()
con.close()
return rolling_data
except Exception as e:
logging.error(e)
def get_sigma(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
# there are 160 timestamps within 24 hours
cur.execute('SELECT close FROM {}'.format(ticker))
all_prices = cur.fetchall()
con.close()
return all_prices
except Exception as e:
logging.error(e)
def delete_ticker(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('DROP TABLE {}'.format(ticker))
con.commit()
con.close()
except Exception as e:
logging.error(e)
def get_latest_prices(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT close from {} limit 1'.format(ticker))
rows = cur.fetchall()
con.close()
return rows
except Exception as e:
logging.error(e)
def get_current_prices(date, ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT close from {} where timestamp={}'.format(ticker, date))
rows = cur.fetchall()
con.close()
return rows
except Exception as e:
print(e)
def reload_data(filename):
try:
logging.info("reloading the data from {}".format(filename))
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
f = open("out/" + filename)
rows = csv.reader(f)
next(rows, None)
no_ext_name = path.splitext(filename)[0]
cur.execute("DROP TABLE {}".format(no_ext_name))
if "price" in no_ext_name:
cur.execute("CREATE TABLE {} (timestamp, price)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?)", rows)
elif "result" in no_ext_name:
cur.execute("CREATE TABLE {} (timestamp, price, signal, pnl)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?, ?, ?)", rows)
else:
cur.execute("CREATE TABLE {} (timestamp, open, high, low, close, volume)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?, ?, ?, ?, ?)".format(no_ext_name), rows)
con.commit()
con.close()
logging.info("data successfully loaded")
except Exception as e:
logging.error(e)
def load_data():
# load the data when starting the server
try:
logging.info("loading the data to local database")
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
_, _, filenames = next(walk('out/'))
for filename in filenames:
f = open("out/" + filename)
rows = csv.reader(f)
next(rows, None) # skip the headers
no_ext_name = path.splitext(filename)[0]
if "price" in no_ext_name:
cur.execute("CREATE TABLE {} (timestamp, price)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?)", rows)
elif "result" in no_ext_name:
cur.execute("CREATE TABLE {} (timestamp, price, signal, pnl)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?, ?, ?)", rows)
else:
cur.execute("CREATE TABLE {} (timestamp, open, high, low, close, volume)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?, ?, ?, ?, ?)".format(no_ext_name), rows)
con.commit()
con.close()
logging.info("data successfully loaded")
except Exception as e:
logging.error(e)
| import logging
logging = logging.getLogger()
import sqlite3
import csv
import constants
from functools import reduce
from os import walk, path
def get_latest_signal(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT signal from {} limit 1'.format(ticker))
rows = cur.fetchall()
con.close()
return rows
except Exception as e:
logging.error(e)
def get_signal(date, ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT signal from {} where timestamp={}'.format(ticker, date))
rows = cur.fetchall()
con.close()
return rows
except Exception as e:
logging.error(e)
def get_rolling_data(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT timestamp, close FROM {}'.format(ticker))
rolling_data = cur.fetchall()
con.close()
return rolling_data
except Exception as e:
logging.error(e)
def get_sigma(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
# there are 160 timestamps within 24 hours
cur.execute('SELECT close FROM {}'.format(ticker))
all_prices = cur.fetchall()
con.close()
return all_prices
except Exception as e:
logging.error(e)
def delete_ticker(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('DROP TABLE {}'.format(ticker))
con.commit()
con.close()
except Exception as e:
logging.error(e)
def get_latest_prices(ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT close from {} limit 1'.format(ticker))
rows = cur.fetchall()
con.close()
return rows
except Exception as e:
logging.error(e)
def get_current_prices(date, ticker):
try:
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
cur.execute('SELECT close from {} where timestamp={}'.format(ticker, date))
rows = cur.fetchall()
con.close()
return rows
except Exception as e:
print(e)
def reload_data(filename):
try:
logging.info("reloading the data from {}".format(filename))
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
f = open("out/" + filename)
rows = csv.reader(f)
next(rows, None)
no_ext_name = path.splitext(filename)[0]
cur.execute("DROP TABLE {}".format(no_ext_name))
if "price" in no_ext_name:
cur.execute("CREATE TABLE {} (timestamp, price)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?)", rows)
elif "result" in no_ext_name:
cur.execute("CREATE TABLE {} (timestamp, price, signal, pnl)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?, ?, ?)", rows)
else:
cur.execute("CREATE TABLE {} (timestamp, open, high, low, close, volume)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?, ?, ?, ?, ?)".format(no_ext_name), rows)
con.commit()
con.close()
logging.info("data successfully loaded")
except Exception as e:
logging.error(e)
def load_data():
# load the data when starting the server
try:
logging.info("loading the data to local database")
con = sqlite3.connect(constants.DATABASENAME)
cur = con.cursor()
_, _, filenames = next(walk('out/'))
for filename in filenames:
f = open("out/" + filename)
rows = csv.reader(f)
next(rows, None) # skip the headers
no_ext_name = path.splitext(filename)[0]
if "price" in no_ext_name:
cur.execute("CREATE TABLE {} (timestamp, price)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?)", rows)
elif "result" in no_ext_name:
cur.execute("CREATE TABLE {} (timestamp, price, signal, pnl)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?, ?, ?)", rows)
else:
cur.execute("CREATE TABLE {} (timestamp, open, high, low, close, volume)".format(no_ext_name))
cur.executemany("INSERT INTO {} VALUES (CAST(strftime('%s', ?) as integer), ?, ?, ?, ?, ?)".format(no_ext_name), rows)
con.commit()
con.close()
logging.info("data successfully loaded")
except Exception as e:
logging.error(e)
| en | 0.776875 | # there are 160 timestamps within 24 hours # load the data when starting the server # skip the headers | 2.618879 | 3 |
varats-core/varats/provider/cve/cve_provider.py | Kaufi-Jonas/VaRA-Tool-Suite | 8 | 6618795 | <filename>varats-core/varats/provider/cve/cve_provider.py
"""Module for the :class:`CVEProvider`."""
import sys
import typing as tp
from benchbuild.project import Project
from varats.project.project_util import get_local_project_git_path
from varats.provider.cve.cve import CVE
from varats.provider.cve.cve_map import generate_cve_map, CVEDict
from varats.provider.provider import Provider
from varats.utils.git_util import FullCommitHash
if sys.version_info <= (3, 8):
from typing_extensions import Protocol, runtime_checkable
else:
from typing import Protocol, runtime_checkable
@runtime_checkable
class CVEProviderHook(Protocol):
"""
Gives the :class:`CVEProvider` the necessary information how to find CVEs
and CWEs for a project.
This class should be inherited by projects.
"""
@classmethod
def get_cve_product_info(cls) -> tp.List[tp.Tuple[str, str]]:
"""
Get information on how to find CVEs for a project.
Returns:
a tuple ``(vendor, product)``
"""
class CVEProvider(Provider):
"""Provides CVE and CWE information for a project."""
def __init__(self, project: tp.Type[Project]) -> None:
super().__init__(project)
project_name = project.NAME
if issubclass(project, CVEProviderHook):
self.__cve_map: CVEDict = generate_cve_map(
get_local_project_git_path(project_name),
project.get_cve_product_info()
)
else:
raise ValueError(
f"Project {project} does not implement "
f"CVEProviderHook."
)
@classmethod
def create_provider_for_project(
cls, project: tp.Type[Project]
) -> tp.Optional['CVEProvider']:
if issubclass(project, CVEProviderHook):
return CVEProvider(project)
return None
@classmethod
def create_default_provider(
cls, project: tp.Type[Project]
) -> 'CVEProvider':
return CVEDefaultProvider(project)
def get_revision_cve_tuples(
self
) -> tp.Set[tp.Tuple[FullCommitHash, tp.FrozenSet[CVE]]]:
"""
Get all CVEs associated with this provider's project along with the
fixing commits/versions.
Return:
a set of tuples of commit hash and cves
"""
return {(k, frozenset(v["cve"])) for k, v in self.__cve_map.items()}
class CVEDefaultProvider(CVEProvider):
"""Default implementation of the :class:`CVE provider` for projects that do
not (yet) support CVEs."""
def __init__(self, project: tp.Type[Project]) -> None:
# pylint: disable=E1003
super(CVEProvider, self).__init__(project)
def get_revision_cve_tuples(
self
) -> tp.Set[tp.Tuple[FullCommitHash, tp.FrozenSet[CVE]]]:
return set()
| <filename>varats-core/varats/provider/cve/cve_provider.py
"""Module for the :class:`CVEProvider`."""
import sys
import typing as tp
from benchbuild.project import Project
from varats.project.project_util import get_local_project_git_path
from varats.provider.cve.cve import CVE
from varats.provider.cve.cve_map import generate_cve_map, CVEDict
from varats.provider.provider import Provider
from varats.utils.git_util import FullCommitHash
if sys.version_info <= (3, 8):
from typing_extensions import Protocol, runtime_checkable
else:
from typing import Protocol, runtime_checkable
@runtime_checkable
class CVEProviderHook(Protocol):
"""
Gives the :class:`CVEProvider` the necessary information how to find CVEs
and CWEs for a project.
This class should be inherited by projects.
"""
@classmethod
def get_cve_product_info(cls) -> tp.List[tp.Tuple[str, str]]:
"""
Get information on how to find CVEs for a project.
Returns:
a tuple ``(vendor, product)``
"""
class CVEProvider(Provider):
"""Provides CVE and CWE information for a project."""
def __init__(self, project: tp.Type[Project]) -> None:
super().__init__(project)
project_name = project.NAME
if issubclass(project, CVEProviderHook):
self.__cve_map: CVEDict = generate_cve_map(
get_local_project_git_path(project_name),
project.get_cve_product_info()
)
else:
raise ValueError(
f"Project {project} does not implement "
f"CVEProviderHook."
)
@classmethod
def create_provider_for_project(
cls, project: tp.Type[Project]
) -> tp.Optional['CVEProvider']:
if issubclass(project, CVEProviderHook):
return CVEProvider(project)
return None
@classmethod
def create_default_provider(
cls, project: tp.Type[Project]
) -> 'CVEProvider':
return CVEDefaultProvider(project)
def get_revision_cve_tuples(
self
) -> tp.Set[tp.Tuple[FullCommitHash, tp.FrozenSet[CVE]]]:
"""
Get all CVEs associated with this provider's project along with the
fixing commits/versions.
Return:
a set of tuples of commit hash and cves
"""
return {(k, frozenset(v["cve"])) for k, v in self.__cve_map.items()}
class CVEDefaultProvider(CVEProvider):
"""Default implementation of the :class:`CVE provider` for projects that do
not (yet) support CVEs."""
def __init__(self, project: tp.Type[Project]) -> None:
# pylint: disable=E1003
super(CVEProvider, self).__init__(project)
def get_revision_cve_tuples(
self
) -> tp.Set[tp.Tuple[FullCommitHash, tp.FrozenSet[CVE]]]:
return set()
| en | 0.821853 | Module for the :class:`CVEProvider`. Gives the :class:`CVEProvider` the necessary information how to find CVEs and CWEs for a project. This class should be inherited by projects. Get information on how to find CVEs for a project. Returns: a tuple ``(vendor, product)`` Provides CVE and CWE information for a project. Get all CVEs associated with this provider's project along with the fixing commits/versions. Return: a set of tuples of commit hash and cves Default implementation of the :class:`CVE provider` for projects that do not (yet) support CVEs. # pylint: disable=E1003 | 2.287673 | 2 |
cursoemvideo/aulatuplas.py | rafaelsantosmg/cev_python3 | 1 | 6618796 | lanche = ('Hambúrguer', 'Suco', 'Pizza', 'Pudim', 'Batata frita')
print(lanche)
print()
for comida in lanche: # mostra os itens da variavel seguindo a ordem dos blocos [0:...]
print(f'Eu comi {comida}')
print()
for cont in range(0, len(lanche)): # mostra os itens da variavel e a posição dentro de cada bloco
print(f'Eu vou comer {lanche[cont]} na posição {cont}')
print()
for pos, comida in enumerate(lanche): # define a variavel para o comando enumerate e mostra os itens da variavel e a
# posição dentro de cada bloco
print(f'Eu vou comer {comida} na posição {pos}')
print()
print('Cara comi demais!')
print()
print(sorted(lanche)) # Ordena os itens da variável
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b # soma a tupla a + b, nota que os valores são agregados e não somados
d = b + a # soma a tupla b + a, nota que os valores são agregados e não somados
print(a)
print(b)
print(c)
print(d)
print(c.count(5)) # metodo count mostra quantas x '5' aparece na tupla
print(c.index(4)) # metodo index mostra a posição que foi encontrado o primeiro '4'
| lanche = ('Hambúrguer', 'Suco', 'Pizza', 'Pudim', 'Batata frita')
print(lanche)
print()
for comida in lanche: # mostra os itens da variavel seguindo a ordem dos blocos [0:...]
print(f'Eu comi {comida}')
print()
for cont in range(0, len(lanche)): # mostra os itens da variavel e a posição dentro de cada bloco
print(f'Eu vou comer {lanche[cont]} na posição {cont}')
print()
for pos, comida in enumerate(lanche): # define a variavel para o comando enumerate e mostra os itens da variavel e a
# posição dentro de cada bloco
print(f'Eu vou comer {comida} na posição {pos}')
print()
print('Cara comi demais!')
print()
print(sorted(lanche)) # Ordena os itens da variável
a = (2, 5, 4)
b = (5, 8, 1, 2)
c = a + b # soma a tupla a + b, nota que os valores são agregados e não somados
d = b + a # soma a tupla b + a, nota que os valores são agregados e não somados
print(a)
print(b)
print(c)
print(d)
print(c.count(5)) # metodo count mostra quantas x '5' aparece na tupla
print(c.index(4)) # metodo index mostra a posição que foi encontrado o primeiro '4'
| pt | 0.976634 | # mostra os itens da variavel seguindo a ordem dos blocos [0:...] # mostra os itens da variavel e a posição dentro de cada bloco # define a variavel para o comando enumerate e mostra os itens da variavel e a # posição dentro de cada bloco # Ordena os itens da variável # soma a tupla a + b, nota que os valores são agregados e não somados # soma a tupla b + a, nota que os valores são agregados e não somados # metodo count mostra quantas x '5' aparece na tupla # metodo index mostra a posição que foi encontrado o primeiro '4' | 4.062884 | 4 |
slammer_driver/scripts/notification.py | AlessioMorale/slammer_rover | 3 | 6618797 | <reponame>AlessioMorale/slammer_rover<filename>slammer_driver/scripts/notification.py
#!/usr/bin/env python3
import rospy
import subprocess
import math
from sensor_msgs.msg import BatteryState
from ros_signalling.msg import blink
battery_voltage = 0
battery_current = 0
battery_charge = 0
battery_percentage = 0
battery_warn_threshold = 13.2
battery_critical_threshold = 13.0
front = 3
lamp = 1
front_lamps = [lamp, 1]
rear = 0
front_count = 8
rear_count = 20
# R, G, B
color_yellow = [ 0x80, 40, 00]
color_dim_red = [16, 0, 0]
color_dim_white = [16, 16, 16]
color_green = [0, 16, 0]
color_black = [0, 0, 0]
front_colour = front_count * [color_yellow]
rear_colour = 5 * [color_green] + 6 * [color_dim_white] + 5 * [color_dim_red] + [color_green]
front_lamps_color = [16, 16, 16]
lamp_max = 200
lamp_batt_ok = [lamp_max, lamp_max, lamp_max]
lamp_batt_warn = [lamp_max // 2, lamp_max, 0]
lamp_batt_critical = [0, lamp_max, 0]
def on_batterystate(msg):
global battery_voltage
global battery_current
global battery_charge
global battery_percentage
battery_voltage = msg.voltage
battery_current = msg.current
battery_charge = msg.charge
battery_percentage = msg.percentage
# initialization
if __name__ == '__main__':
# setup ros node
rospy.init_node('slammer_notifications')
rospy.Subscriber('/unav2/status/battery', BatteryState, on_batterystate)
pub = rospy.Publisher('/signalling/leds', blink, queue_size=40)
rospy.loginfo("Waiting for led service")
while not rospy.is_shutdown() and pub.get_num_connections() == 0:
rospy.sleep(0.05)
rospy.loginfo("Connected")
led = blink()
led.led = 0
led.msecOn = 0
led.msecOff = 0
led.single = False
init = True
# start running
while not rospy.core.is_shutdown():
if init:
#front white
led.group = front
for i in range(len(front_colour)):
led.R = front_colour[i][0]
led.G = front_colour[i][1]
led.B = front_colour[i][2]
led.led = i
pub.publish(led)
rospy.rostime.wallsleep(0.1)
#front lights
led.group = front_lamps[0]
led.R = front_lamps_color[0]
led.G = front_lamps_color[1]
led.B = front_lamps_color[2]
led.led = front_lamps[1]
pub.publish(led)
rospy.rostime.wallsleep(0.1)
#Read red
led.group = rear
for i in range(len(rear_colour)):
led.R = rear_colour[i][0]
led.G = rear_colour[i][1]
led.B = rear_colour[i][2]
led.led = i
pub.publish(led)
rospy.rostime.wallsleep(0.01)
init = False
led.group = lamp
led.led = 0
led.msecOn = 50
led.msecOff = 0
led.single = True
if(battery_voltage > battery_warn_threshold):
led.R = lamp_batt_ok[0]
led.G = lamp_batt_ok[1]
led.B = lamp_batt_ok[2]
elif battery_voltage > battery_critical_threshold:
led.R = lamp_batt_warn[0]
led.G = lamp_batt_warn[1]
led.B = lamp_batt_warn[2]
else:
led.R = lamp_batt_critical[0]
led.G = lamp_batt_critical[1]
led.B = lamp_batt_critical[2]
pub.publish(led)
rospy.rostime.wallsleep(2.0)
| #!/usr/bin/env python3
import rospy
import subprocess
import math
from sensor_msgs.msg import BatteryState
from ros_signalling.msg import blink
battery_voltage = 0
battery_current = 0
battery_charge = 0
battery_percentage = 0
battery_warn_threshold = 13.2
battery_critical_threshold = 13.0
front = 3
lamp = 1
front_lamps = [lamp, 1]
rear = 0
front_count = 8
rear_count = 20
# R, G, B
color_yellow = [ 0x80, 40, 00]
color_dim_red = [16, 0, 0]
color_dim_white = [16, 16, 16]
color_green = [0, 16, 0]
color_black = [0, 0, 0]
front_colour = front_count * [color_yellow]
rear_colour = 5 * [color_green] + 6 * [color_dim_white] + 5 * [color_dim_red] + [color_green]
front_lamps_color = [16, 16, 16]
lamp_max = 200
lamp_batt_ok = [lamp_max, lamp_max, lamp_max]
lamp_batt_warn = [lamp_max // 2, lamp_max, 0]
lamp_batt_critical = [0, lamp_max, 0]
def on_batterystate(msg):
global battery_voltage
global battery_current
global battery_charge
global battery_percentage
battery_voltage = msg.voltage
battery_current = msg.current
battery_charge = msg.charge
battery_percentage = msg.percentage
# initialization
if __name__ == '__main__':
# setup ros node
rospy.init_node('slammer_notifications')
rospy.Subscriber('/unav2/status/battery', BatteryState, on_batterystate)
pub = rospy.Publisher('/signalling/leds', blink, queue_size=40)
rospy.loginfo("Waiting for led service")
while not rospy.is_shutdown() and pub.get_num_connections() == 0:
rospy.sleep(0.05)
rospy.loginfo("Connected")
led = blink()
led.led = 0
led.msecOn = 0
led.msecOff = 0
led.single = False
init = True
# start running
while not rospy.core.is_shutdown():
if init:
#front white
led.group = front
for i in range(len(front_colour)):
led.R = front_colour[i][0]
led.G = front_colour[i][1]
led.B = front_colour[i][2]
led.led = i
pub.publish(led)
rospy.rostime.wallsleep(0.1)
#front lights
led.group = front_lamps[0]
led.R = front_lamps_color[0]
led.G = front_lamps_color[1]
led.B = front_lamps_color[2]
led.led = front_lamps[1]
pub.publish(led)
rospy.rostime.wallsleep(0.1)
#Read red
led.group = rear
for i in range(len(rear_colour)):
led.R = rear_colour[i][0]
led.G = rear_colour[i][1]
led.B = rear_colour[i][2]
led.led = i
pub.publish(led)
rospy.rostime.wallsleep(0.01)
init = False
led.group = lamp
led.led = 0
led.msecOn = 50
led.msecOff = 0
led.single = True
if(battery_voltage > battery_warn_threshold):
led.R = lamp_batt_ok[0]
led.G = lamp_batt_ok[1]
led.B = lamp_batt_ok[2]
elif battery_voltage > battery_critical_threshold:
led.R = lamp_batt_warn[0]
led.G = lamp_batt_warn[1]
led.B = lamp_batt_warn[2]
else:
led.R = lamp_batt_critical[0]
led.G = lamp_batt_critical[1]
led.B = lamp_batt_critical[2]
pub.publish(led)
rospy.rostime.wallsleep(2.0) | en | 0.326145 | #!/usr/bin/env python3 # R, G, B # initialization # setup ros node # start running #front white #front lights #Read red | 2.258103 | 2 |
src/test/test_api_v1.py | jayrbolton/relation_engine_api | 1 | 6618798 | <gh_stars>1-10
"""
Simple integration tests on the API itself.
We make actual ajax requests to the running docker container.
"""
import unittest
import requests
import json
import os
from src.relation_engine_server.utils.config import get_config
_CONF = get_config()
# Use the mock auth tokens
NON_ADMIN_TOKEN = '<PASSWORD>'
ADMIN_TOKEN = '<PASSWORD>'
INVALID_TOKEN = '<PASSWORD>'
# Use the docker-compose url of the running flask server
URL = os.environ.get('TEST_URL', 'http://localhost:5000')
VERSION = 'v1'
API_URL = '/'.join([URL, 'api', VERSION])
HEADERS_NON_ADMIN = {'Authorization': 'Bearer ' + NON_ADMIN_TOKEN, 'Content-Type': 'application/json'}
HEADERS_ADMIN = {'Authorization': 'Bearer ' + ADMIN_TOKEN, 'Content-Type': 'application/json'}
def create_test_docs(count):
"""Produce some test documents."""
def doc(i):
return '{"name": "name", "_key": "%s", "is_public": true}' % i
return '\n'.join(doc(i) for i in range(0, count))
def create_test_edges(count):
"""Produce some test edges."""
def doc(i):
return '{"_from": "test_vertex/%s", "_to": "test_vertex/%s"}' % (i, i)
return '\n'.join(doc(i) for i in range(0, count))
def save_test_docs(count, edges=False):
if edges:
docs = create_test_edges(count)
collection = 'test_edge'
else:
docs = create_test_docs(count)
collection = 'test_vertex'
return requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': collection},
data=docs,
headers=HEADERS_ADMIN
).json()
class TestApi(unittest.TestCase):
def test_root(self):
"""Test root path for api."""
resp = requests.get(URL + '/').json()
self.assertEqual(resp['arangodb_status'], 'connected_authorized')
self.assertTrue(resp['commit_hash'])
self.assertTrue(resp['repo_url'])
def test_config(self):
"""Test config fetch."""
resp = requests.get(API_URL + '/config').json()
self.assertTrue(len(resp['auth_url']))
self.assertTrue(len(resp['workspace_url']))
self.assertTrue(len(resp['kbase_endpoint']))
self.assertTrue(len(resp['db_url']))
self.assertTrue(len(resp['db_name']))
self.assertTrue(len(resp['spec_url']))
def test_update_specs(self):
"""Test the endpoint that triggers an update on the specs."""
resp = requests.put(
API_URL + '/specs',
headers=HEADERS_ADMIN,
params={'reset': '1', 'init_collections': '1'}
)
resp_json = resp.json()
self.assertEqual(resp.status_code, 200)
self.assertTrue(len(resp_json['status']))
# Test that the indexes get created and not duplicated
url = _CONF['db_url'] + '/_api/index'
auth = (_CONF['db_user'], _CONF['db_pass'])
resp = requests.get(url, params={'collection': 'ncbi_taxon'}, auth=auth)
resp_json = resp.json()
indexes = resp_json['indexes']
self.assertEqual(len(indexes), 4)
fields = [i['fields'] for i in indexes]
self.assertEqual(set(tuple(f) for f in fields), {
('_key',),
('scientific_name',),
('id', 'expired', 'created'),
('expired', 'created', 'last_version')
})
def test_list_stored_queries(self):
"""Test the listing out of saved AQL stored queries."""
resp = requests.get(API_URL + '/specs/stored_queries').json()
self.assertTrue('list_test_vertices' in resp)
def test_list_schemas(self):
"""Test the listing out of registered JSON schemas for vertices and edges."""
resp = requests.get(API_URL + '/specs/schemas').json()
self.assertTrue('test_vertex' in resp)
self.assertTrue('test_edge' in resp)
self.assertFalse('error' in resp)
self.assertTrue(len(resp))
def test_fetch_schema_for_doc(self):
"""Given a document ID, fetch its schema."""
resp = requests.get(API_URL + '/specs/schemas', params={'doc_id': 'test_vertex/123'}).json()
self.assertEqual(resp['name'], 'test_vertex')
self.assertEqual(resp['type'], 'vertex')
self.assertTrue(resp['schema'])
def test_save_documents_missing_auth(self):
"""Test an invalid attempt to save a doc with a missing auth token."""
resp = requests.put(
API_URL + '/documents?on_duplicate=error&overwrite=true&collection'
).json()
self.assertEqual(resp['error'], {'message': 'Missing header: Authorization', 'status': 400})
def test_save_documents_invalid_auth(self):
"""Test an invalid attempt to save a doc with a bad auth token."""
resp = requests.put(
API_URL + '/documents?on_duplicate=error&overwrite=true&collection',
headers={'Authorization': 'Bearer ' + INVALID_TOKEN}
).json()
self.assertEqual(resp['error']['message'], 'Unauthorized')
self.assertEqual(resp['error']['status'], 403)
def test_save_documents_non_admin(self):
"""Test an invalid attempt to save a doc as a non-admin."""
resp = requests.put(
API_URL + '/documents?on_duplicate=error&overwrite=true&collection',
headers=HEADERS_NON_ADMIN
).json()
self.assertEqual(resp['error']['message'], 'Unauthorized')
self.assertEqual(resp['error']['status'], 403)
def test_save_documents_invalid_schema(self):
"""Test the case where some documents fail against their schema."""
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'ignore', 'collection': 'test_vertex'},
data='{"name": "x"}\n{"name": "y"}',
headers=HEADERS_ADMIN
).json()
self.assertEqual(resp['error'], "'_key' is a required property")
self.assertEqual(resp['value'], {'name': 'x'})
self.assertEqual(resp['path'], [])
self.assertEqual(resp['failed_validator'], 'required')
def test_save_documents_missing_schema(self):
"""Test the case where the collection/schema does not exist."""
resp = requests.put(
API_URL + '/documents',
params={'collection': 'xyzabc'},
data='',
headers=HEADERS_ADMIN
).json()
self.assertTrue('Schema does not exist' in resp['error'])
def test_save_documents_invalid_json(self):
"""Test an attempt to save documents with an invalid JSON body."""
resp = requests.put(
API_URL + '/documents',
params={'collection': 'test_vertex'},
data='\n',
headers=HEADERS_ADMIN
).json()
self.assertTrue('Unable to parse' in resp['error'])
self.assertEqual(resp['pos'], 1)
self.assertEqual(resp['source_json'], '\n')
def test_create_documents(self):
"""Test all valid cases for saving documents."""
resp = save_test_docs(3)
expected = {'created': 3, 'errors': 0, 'empty': 0, 'updated': 0, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_create_edges(self):
"""Test all valid cases for saving edges."""
resp = save_test_docs(3, edges=True)
expected = {'created': 3, 'errors': 0, 'empty': 0, 'updated': 0, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_update_documents(self):
"""Test updating existing documents."""
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'update', 'collection': 'test_vertex'},
data=create_test_docs(3),
headers=HEADERS_ADMIN
).json()
expected = {'created': 0, 'errors': 0, 'empty': 0, 'updated': 3, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_update_edge(self):
"""Test updating existing edge."""
edges = create_test_edges(3)
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'update', 'collection': 'test_edge'},
data=create_test_edges(3),
headers=HEADERS_ADMIN
)
self.assertTrue(resp.ok)
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'update', 'collection': 'test_edge'},
data=edges,
headers=HEADERS_ADMIN
).json()
expected = {'created': 0, 'errors': 0, 'empty': 0, 'updated': 3, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_replace_documents(self):
"""Test replacing of existing documents."""
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'replace', 'collection': 'test_vertex'},
data=create_test_docs(3),
headers=HEADERS_ADMIN
).json()
expected = {'created': 0, 'errors': 0, 'empty': 0, 'updated': 3, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_save_documents_dupe_errors(self):
"""Test where we want to raise errors on duplicate documents."""
save_test_docs(3)
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'error', 'collection': 'test_vertex', 'display_errors': '1'},
data=create_test_docs(3),
headers=HEADERS_ADMIN
).json()
self.assertEqual(resp['created'], 0)
self.assertEqual(resp['errors'], 3)
self.assertTrue(resp['details'])
def test_save_documents_ignore_dupes(self):
"""Test ignoring duplicate, existing documents when saving."""
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'ignore', 'collection': 'test_vertex'},
data=create_test_docs(3),
headers=HEADERS_ADMIN
).json()
expected = {'created': 0, 'errors': 0, 'empty': 0, 'updated': 0, 'ignored': 3, 'error': False}
self.assertEqual(resp, expected)
def test_admin_query(self):
"""Test an ad-hoc query made by an admin."""
save_test_docs(1)
query = 'for v in test_vertex sort rand() limit @count return v._id'
resp = requests.post(
API_URL + '/query_results',
params={},
headers=HEADERS_ADMIN,
data=json.dumps({'query': query, 'count': 1})
).json()
self.assertEqual(resp['count'], 1)
self.assertEqual(len(resp['results']), 1)
def test_admin_query_non_admin(self):
"""Test an ad-hoc query error as a non-admin."""
query = 'for v in test_vertex sort rand() limit @count return v._id'
resp = requests.post(
API_URL + '/query_results',
params={},
headers=HEADERS_NON_ADMIN,
data=json.dumps({'query': query, 'count': 1})
).json()
self.assertEqual(resp['error']['message'], 'Unauthorized')
self.assertEqual(resp['error']['status'], 403)
def test_admin_query_invalid_auth(self):
"""Test the error response for an ad-hoc admin query without auth."""
query = 'for v in test_vertex sort rand() limit @count return v._id'
resp = requests.post(
API_URL + '/query_results',
params={},
headers={'Authorization': INVALID_TOKEN},
data=json.dumps({'query': query, 'count': 1})
).json()
self.assertEqual(resp['error']['message'], 'Unauthorized')
self.assertEqual(resp['error']['status'], 403)
def test_query_with_cursor(self):
"""Test getting more data via a query cursor and setting batch size."""
save_test_docs(count=20)
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'list_test_vertices', 'batch_size': 10, 'full_count': True}
).json()
self.assertTrue(resp['cursor_id'])
self.assertEqual(resp['has_more'], True)
self.assertEqual(resp['count'], 20)
self.assertEqual(resp['stats']['fullCount'], 20)
self.assertTrue(len(resp['results']), 10)
cursor_id = resp['cursor_id']
resp = requests.post(
API_URL + '/query_results',
params={'cursor_id': cursor_id}
).json()
self.assertEqual(resp['count'], 20)
self.assertEqual(resp['stats']['fullCount'], 20)
self.assertEqual(resp['has_more'], False)
self.assertEqual(resp['cursor_id'], None)
self.assertTrue(len(resp['results']), 10)
# Try to get the same cursor again
resp = requests.post(
API_URL + '/query_results',
params={'cursor_id': cursor_id}
).json()
self.assertTrue(resp['error'])
self.assertEqual(resp['arango_message'], 'cursor not found')
def test_query_no_name(self):
"""Test a query error with a stored query name that does not exist."""
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'nonexistent'}
).json()
self.assertEqual(resp['error'], 'Stored query does not exist.')
self.assertEqual(resp['name'], 'nonexistent')
def test_query_missing_bind_var(self):
"""Test a query error with a missing bind variable."""
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'list_test_vertices'},
data=json.dumps({'xyz': 'test_vertex'})
).json()
self.assertEqual(resp['error'], 'ArangoDB server error.')
self.assertTrue(resp['arango_message'])
def test_auth_query_with_access(self):
"""Test the case where we query a collection with specific workspace access."""
ws_id = 3
# Remove all test vertices and create one with a ws_id
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data=json.dumps({
'name': 'requires_auth',
'_key': '123',
'ws_id': ws_id
}),
headers=HEADERS_ADMIN
)
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'list_test_vertices'},
headers={'Authorization': 'valid_token'} # see ./mock_workspace/endpoints.json
).json()
self.assertEqual(resp['count'], 1)
self.assertEqual(resp['results'][0]['ws_id'], ws_id)
def test_auth_query_no_access(self):
"""Test the case where we try to query a collection without the right workspace access."""
# Remove all test vertices and create one with a ws_id
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data='{"name": "requires_auth", "_key": "1", "ws_id": 9999}',
headers=HEADERS_ADMIN
)
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'list_test_vertices'},
headers={'Authorization': 'valid_token'} # see ./mock_workspace/endpoints.json
).json()
self.assertEqual(resp['count'], 0)
def test_query_cannot_pass_ws_ids(self):
"""Test that users cannot set the ws_ids param."""
ws_id = 99
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data='{"name": "requires_auth", "_key": "1", "ws_id": 99}',
headers=HEADERS_ADMIN
)
resp = requests.post(
API_URL + '/query_results',
params={'view': 'list_test_vertices'},
data=json.dumps({'ws_ids': [ws_id]}),
headers={'Authorization': 'valid_token'}
).json()
self.assertEqual(resp['count'], 0)
def test_auth_query_invalid_token(self):
"""Test the case where we try to authorize a query using an invalid auth token."""
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data='{"name": "requires_auth", "_key": "1", "ws_id": 99}',
headers=HEADERS_ADMIN
)
resp = requests.post(
API_URL + '/query_results',
params={'view': 'list_test_vertices'},
data=json.dumps({'ws_ids': [1]}),
headers={'Authorization': INVALID_TOKEN}
)
self.assertEqual(resp.status_code, 403)
def test_auth_adhoc_query(self):
"""Test that the 'ws_ids' bind-var is set for RE_ADMINs."""
ws_id = 99
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data=json.dumps({'name': 'requires_auth', 'key': '1', 'ws_id': ws_id}),
headers={'Authorization': 'valid_token'}
)
# This is the same query as list_test_vertices.aql in the spec
query = 'for o in test_vertex filter o.is_public || o.ws_id IN ws_ids return o'
resp = requests.post(
API_URL + '/query_results',
data=json.dumps({'query': query}),
headers={'Authorization': ADMIN_TOKEN} # see ./mock_workspace/endpoints.json
).json()
self.assertEqual(resp['count'], 1)
def test_save_docs_invalid(self):
"""Test that an invalid bulk save returns a 400 response"""
doc = {'_from': '|||', '_to': '|||'}
resp = requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_edge', 'display_errors': 1},
data=json.dumps(doc),
headers=HEADERS_ADMIN
)
self.assertEqual(resp.status_code, 400)
resp_json = resp.json()
self.assertEqual(resp_json['errors'], 1)
def test_list_data_sources(self):
resp = requests.get(API_URL + '/data_sources')
self.assertTrue(resp.ok)
resp_json = resp.json()
self.assertTrue(len(resp_json['data_sources']) > 0)
self.assertEqual(set(type(x) for x in resp_json['data_sources']), {str})
def test_show_data_source(self):
resp = requests.get(API_URL + '/data_sources/ncbi_taxonomy')
self.assertTrue(resp.ok)
resp_json = resp.json()
self.assertEqual(type(resp_json['data_source']), dict)
self.assertEqual(set(resp_json['data_source'].keys()), {
'name', 'category', 'title', 'home_url', 'data_url', 'logo_url'
})
self.assertTrue(
'/ui-assets/images/third-party-data-sources/ncbi' in resp_json['data_source']['logo_url']
)
def test_show_data_source_unknown(self):
"""Unknown data source name should yield 404 status."""
name = 'xyzyxz'
resp = requests.get(f"{API_URL}/data_sources/{name}")
self.assertEqual(resp.status_code, 404)
resp_json = resp.json()
# Just assert that it returns any json in the body
self.assertEqual(resp_json, {
'error': {
'message': 'Not found',
'status': 404,
'details': f"The data source with name '{name}' does not exist.",
}
})
| """
Simple integration tests on the API itself.
We make actual ajax requests to the running docker container.
"""
import unittest
import requests
import json
import os
from src.relation_engine_server.utils.config import get_config
_CONF = get_config()
# Use the mock auth tokens
NON_ADMIN_TOKEN = '<PASSWORD>'
ADMIN_TOKEN = '<PASSWORD>'
INVALID_TOKEN = '<PASSWORD>'
# Use the docker-compose url of the running flask server
URL = os.environ.get('TEST_URL', 'http://localhost:5000')
VERSION = 'v1'
API_URL = '/'.join([URL, 'api', VERSION])
HEADERS_NON_ADMIN = {'Authorization': 'Bearer ' + NON_ADMIN_TOKEN, 'Content-Type': 'application/json'}
HEADERS_ADMIN = {'Authorization': 'Bearer ' + ADMIN_TOKEN, 'Content-Type': 'application/json'}
def create_test_docs(count):
"""Produce some test documents."""
def doc(i):
return '{"name": "name", "_key": "%s", "is_public": true}' % i
return '\n'.join(doc(i) for i in range(0, count))
def create_test_edges(count):
"""Produce some test edges."""
def doc(i):
return '{"_from": "test_vertex/%s", "_to": "test_vertex/%s"}' % (i, i)
return '\n'.join(doc(i) for i in range(0, count))
def save_test_docs(count, edges=False):
if edges:
docs = create_test_edges(count)
collection = 'test_edge'
else:
docs = create_test_docs(count)
collection = 'test_vertex'
return requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': collection},
data=docs,
headers=HEADERS_ADMIN
).json()
class TestApi(unittest.TestCase):
def test_root(self):
"""Test root path for api."""
resp = requests.get(URL + '/').json()
self.assertEqual(resp['arangodb_status'], 'connected_authorized')
self.assertTrue(resp['commit_hash'])
self.assertTrue(resp['repo_url'])
def test_config(self):
"""Test config fetch."""
resp = requests.get(API_URL + '/config').json()
self.assertTrue(len(resp['auth_url']))
self.assertTrue(len(resp['workspace_url']))
self.assertTrue(len(resp['kbase_endpoint']))
self.assertTrue(len(resp['db_url']))
self.assertTrue(len(resp['db_name']))
self.assertTrue(len(resp['spec_url']))
def test_update_specs(self):
"""Test the endpoint that triggers an update on the specs."""
resp = requests.put(
API_URL + '/specs',
headers=HEADERS_ADMIN,
params={'reset': '1', 'init_collections': '1'}
)
resp_json = resp.json()
self.assertEqual(resp.status_code, 200)
self.assertTrue(len(resp_json['status']))
# Test that the indexes get created and not duplicated
url = _CONF['db_url'] + '/_api/index'
auth = (_CONF['db_user'], _CONF['db_pass'])
resp = requests.get(url, params={'collection': 'ncbi_taxon'}, auth=auth)
resp_json = resp.json()
indexes = resp_json['indexes']
self.assertEqual(len(indexes), 4)
fields = [i['fields'] for i in indexes]
self.assertEqual(set(tuple(f) for f in fields), {
('_key',),
('scientific_name',),
('id', 'expired', 'created'),
('expired', 'created', 'last_version')
})
def test_list_stored_queries(self):
"""Test the listing out of saved AQL stored queries."""
resp = requests.get(API_URL + '/specs/stored_queries').json()
self.assertTrue('list_test_vertices' in resp)
def test_list_schemas(self):
"""Test the listing out of registered JSON schemas for vertices and edges."""
resp = requests.get(API_URL + '/specs/schemas').json()
self.assertTrue('test_vertex' in resp)
self.assertTrue('test_edge' in resp)
self.assertFalse('error' in resp)
self.assertTrue(len(resp))
def test_fetch_schema_for_doc(self):
"""Given a document ID, fetch its schema."""
resp = requests.get(API_URL + '/specs/schemas', params={'doc_id': 'test_vertex/123'}).json()
self.assertEqual(resp['name'], 'test_vertex')
self.assertEqual(resp['type'], 'vertex')
self.assertTrue(resp['schema'])
def test_save_documents_missing_auth(self):
"""Test an invalid attempt to save a doc with a missing auth token."""
resp = requests.put(
API_URL + '/documents?on_duplicate=error&overwrite=true&collection'
).json()
self.assertEqual(resp['error'], {'message': 'Missing header: Authorization', 'status': 400})
def test_save_documents_invalid_auth(self):
"""Test an invalid attempt to save a doc with a bad auth token."""
resp = requests.put(
API_URL + '/documents?on_duplicate=error&overwrite=true&collection',
headers={'Authorization': 'Bearer ' + INVALID_TOKEN}
).json()
self.assertEqual(resp['error']['message'], 'Unauthorized')
self.assertEqual(resp['error']['status'], 403)
def test_save_documents_non_admin(self):
"""Test an invalid attempt to save a doc as a non-admin."""
resp = requests.put(
API_URL + '/documents?on_duplicate=error&overwrite=true&collection',
headers=HEADERS_NON_ADMIN
).json()
self.assertEqual(resp['error']['message'], 'Unauthorized')
self.assertEqual(resp['error']['status'], 403)
def test_save_documents_invalid_schema(self):
"""Test the case where some documents fail against their schema."""
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'ignore', 'collection': 'test_vertex'},
data='{"name": "x"}\n{"name": "y"}',
headers=HEADERS_ADMIN
).json()
self.assertEqual(resp['error'], "'_key' is a required property")
self.assertEqual(resp['value'], {'name': 'x'})
self.assertEqual(resp['path'], [])
self.assertEqual(resp['failed_validator'], 'required')
def test_save_documents_missing_schema(self):
"""Test the case where the collection/schema does not exist."""
resp = requests.put(
API_URL + '/documents',
params={'collection': 'xyzabc'},
data='',
headers=HEADERS_ADMIN
).json()
self.assertTrue('Schema does not exist' in resp['error'])
def test_save_documents_invalid_json(self):
"""Test an attempt to save documents with an invalid JSON body."""
resp = requests.put(
API_URL + '/documents',
params={'collection': 'test_vertex'},
data='\n',
headers=HEADERS_ADMIN
).json()
self.assertTrue('Unable to parse' in resp['error'])
self.assertEqual(resp['pos'], 1)
self.assertEqual(resp['source_json'], '\n')
def test_create_documents(self):
"""Test all valid cases for saving documents."""
resp = save_test_docs(3)
expected = {'created': 3, 'errors': 0, 'empty': 0, 'updated': 0, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_create_edges(self):
"""Test all valid cases for saving edges."""
resp = save_test_docs(3, edges=True)
expected = {'created': 3, 'errors': 0, 'empty': 0, 'updated': 0, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_update_documents(self):
"""Test updating existing documents."""
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'update', 'collection': 'test_vertex'},
data=create_test_docs(3),
headers=HEADERS_ADMIN
).json()
expected = {'created': 0, 'errors': 0, 'empty': 0, 'updated': 3, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_update_edge(self):
"""Test updating existing edge."""
edges = create_test_edges(3)
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'update', 'collection': 'test_edge'},
data=create_test_edges(3),
headers=HEADERS_ADMIN
)
self.assertTrue(resp.ok)
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'update', 'collection': 'test_edge'},
data=edges,
headers=HEADERS_ADMIN
).json()
expected = {'created': 0, 'errors': 0, 'empty': 0, 'updated': 3, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_replace_documents(self):
"""Test replacing of existing documents."""
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'replace', 'collection': 'test_vertex'},
data=create_test_docs(3),
headers=HEADERS_ADMIN
).json()
expected = {'created': 0, 'errors': 0, 'empty': 0, 'updated': 3, 'ignored': 0, 'error': False}
self.assertEqual(resp, expected)
def test_save_documents_dupe_errors(self):
"""Test where we want to raise errors on duplicate documents."""
save_test_docs(3)
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'error', 'collection': 'test_vertex', 'display_errors': '1'},
data=create_test_docs(3),
headers=HEADERS_ADMIN
).json()
self.assertEqual(resp['created'], 0)
self.assertEqual(resp['errors'], 3)
self.assertTrue(resp['details'])
def test_save_documents_ignore_dupes(self):
"""Test ignoring duplicate, existing documents when saving."""
resp = requests.put(
API_URL + '/documents',
params={'on_duplicate': 'ignore', 'collection': 'test_vertex'},
data=create_test_docs(3),
headers=HEADERS_ADMIN
).json()
expected = {'created': 0, 'errors': 0, 'empty': 0, 'updated': 0, 'ignored': 3, 'error': False}
self.assertEqual(resp, expected)
def test_admin_query(self):
"""Test an ad-hoc query made by an admin."""
save_test_docs(1)
query = 'for v in test_vertex sort rand() limit @count return v._id'
resp = requests.post(
API_URL + '/query_results',
params={},
headers=HEADERS_ADMIN,
data=json.dumps({'query': query, 'count': 1})
).json()
self.assertEqual(resp['count'], 1)
self.assertEqual(len(resp['results']), 1)
def test_admin_query_non_admin(self):
"""Test an ad-hoc query error as a non-admin."""
query = 'for v in test_vertex sort rand() limit @count return v._id'
resp = requests.post(
API_URL + '/query_results',
params={},
headers=HEADERS_NON_ADMIN,
data=json.dumps({'query': query, 'count': 1})
).json()
self.assertEqual(resp['error']['message'], 'Unauthorized')
self.assertEqual(resp['error']['status'], 403)
def test_admin_query_invalid_auth(self):
"""Test the error response for an ad-hoc admin query without auth."""
query = 'for v in test_vertex sort rand() limit @count return v._id'
resp = requests.post(
API_URL + '/query_results',
params={},
headers={'Authorization': INVALID_TOKEN},
data=json.dumps({'query': query, 'count': 1})
).json()
self.assertEqual(resp['error']['message'], 'Unauthorized')
self.assertEqual(resp['error']['status'], 403)
def test_query_with_cursor(self):
"""Test getting more data via a query cursor and setting batch size."""
save_test_docs(count=20)
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'list_test_vertices', 'batch_size': 10, 'full_count': True}
).json()
self.assertTrue(resp['cursor_id'])
self.assertEqual(resp['has_more'], True)
self.assertEqual(resp['count'], 20)
self.assertEqual(resp['stats']['fullCount'], 20)
self.assertTrue(len(resp['results']), 10)
cursor_id = resp['cursor_id']
resp = requests.post(
API_URL + '/query_results',
params={'cursor_id': cursor_id}
).json()
self.assertEqual(resp['count'], 20)
self.assertEqual(resp['stats']['fullCount'], 20)
self.assertEqual(resp['has_more'], False)
self.assertEqual(resp['cursor_id'], None)
self.assertTrue(len(resp['results']), 10)
# Try to get the same cursor again
resp = requests.post(
API_URL + '/query_results',
params={'cursor_id': cursor_id}
).json()
self.assertTrue(resp['error'])
self.assertEqual(resp['arango_message'], 'cursor not found')
def test_query_no_name(self):
"""Test a query error with a stored query name that does not exist."""
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'nonexistent'}
).json()
self.assertEqual(resp['error'], 'Stored query does not exist.')
self.assertEqual(resp['name'], 'nonexistent')
def test_query_missing_bind_var(self):
"""Test a query error with a missing bind variable."""
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'list_test_vertices'},
data=json.dumps({'xyz': 'test_vertex'})
).json()
self.assertEqual(resp['error'], 'ArangoDB server error.')
self.assertTrue(resp['arango_message'])
def test_auth_query_with_access(self):
"""Test the case where we query a collection with specific workspace access."""
ws_id = 3
# Remove all test vertices and create one with a ws_id
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data=json.dumps({
'name': 'requires_auth',
'_key': '123',
'ws_id': ws_id
}),
headers=HEADERS_ADMIN
)
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'list_test_vertices'},
headers={'Authorization': 'valid_token'} # see ./mock_workspace/endpoints.json
).json()
self.assertEqual(resp['count'], 1)
self.assertEqual(resp['results'][0]['ws_id'], ws_id)
def test_auth_query_no_access(self):
"""Test the case where we try to query a collection without the right workspace access."""
# Remove all test vertices and create one with a ws_id
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data='{"name": "requires_auth", "_key": "1", "ws_id": 9999}',
headers=HEADERS_ADMIN
)
resp = requests.post(
API_URL + '/query_results',
params={'stored_query': 'list_test_vertices'},
headers={'Authorization': 'valid_token'} # see ./mock_workspace/endpoints.json
).json()
self.assertEqual(resp['count'], 0)
def test_query_cannot_pass_ws_ids(self):
"""Test that users cannot set the ws_ids param."""
ws_id = 99
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data='{"name": "requires_auth", "_key": "1", "ws_id": 99}',
headers=HEADERS_ADMIN
)
resp = requests.post(
API_URL + '/query_results',
params={'view': 'list_test_vertices'},
data=json.dumps({'ws_ids': [ws_id]}),
headers={'Authorization': 'valid_token'}
).json()
self.assertEqual(resp['count'], 0)
def test_auth_query_invalid_token(self):
"""Test the case where we try to authorize a query using an invalid auth token."""
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data='{"name": "requires_auth", "_key": "1", "ws_id": 99}',
headers=HEADERS_ADMIN
)
resp = requests.post(
API_URL + '/query_results',
params={'view': 'list_test_vertices'},
data=json.dumps({'ws_ids': [1]}),
headers={'Authorization': INVALID_TOKEN}
)
self.assertEqual(resp.status_code, 403)
def test_auth_adhoc_query(self):
"""Test that the 'ws_ids' bind-var is set for RE_ADMINs."""
ws_id = 99
requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_vertex'},
data=json.dumps({'name': 'requires_auth', 'key': '1', 'ws_id': ws_id}),
headers={'Authorization': 'valid_token'}
)
# This is the same query as list_test_vertices.aql in the spec
query = 'for o in test_vertex filter o.is_public || o.ws_id IN ws_ids return o'
resp = requests.post(
API_URL + '/query_results',
data=json.dumps({'query': query}),
headers={'Authorization': ADMIN_TOKEN} # see ./mock_workspace/endpoints.json
).json()
self.assertEqual(resp['count'], 1)
def test_save_docs_invalid(self):
"""Test that an invalid bulk save returns a 400 response"""
doc = {'_from': '|||', '_to': '|||'}
resp = requests.put(
API_URL + '/documents',
params={'overwrite': True, 'collection': 'test_edge', 'display_errors': 1},
data=json.dumps(doc),
headers=HEADERS_ADMIN
)
self.assertEqual(resp.status_code, 400)
resp_json = resp.json()
self.assertEqual(resp_json['errors'], 1)
def test_list_data_sources(self):
resp = requests.get(API_URL + '/data_sources')
self.assertTrue(resp.ok)
resp_json = resp.json()
self.assertTrue(len(resp_json['data_sources']) > 0)
self.assertEqual(set(type(x) for x in resp_json['data_sources']), {str})
def test_show_data_source(self):
resp = requests.get(API_URL + '/data_sources/ncbi_taxonomy')
self.assertTrue(resp.ok)
resp_json = resp.json()
self.assertEqual(type(resp_json['data_source']), dict)
self.assertEqual(set(resp_json['data_source'].keys()), {
'name', 'category', 'title', 'home_url', 'data_url', 'logo_url'
})
self.assertTrue(
'/ui-assets/images/third-party-data-sources/ncbi' in resp_json['data_source']['logo_url']
)
def test_show_data_source_unknown(self):
"""Unknown data source name should yield 404 status."""
name = 'xyzyxz'
resp = requests.get(f"{API_URL}/data_sources/{name}")
self.assertEqual(resp.status_code, 404)
resp_json = resp.json()
# Just assert that it returns any json in the body
self.assertEqual(resp_json, {
'error': {
'message': 'Not found',
'status': 404,
'details': f"The data source with name '{name}' does not exist.",
}
}) | en | 0.758589 | Simple integration tests on the API itself. We make actual ajax requests to the running docker container. # Use the mock auth tokens # Use the docker-compose url of the running flask server Produce some test documents. Produce some test edges. Test root path for api. Test config fetch. Test the endpoint that triggers an update on the specs. # Test that the indexes get created and not duplicated Test the listing out of saved AQL stored queries. Test the listing out of registered JSON schemas for vertices and edges. Given a document ID, fetch its schema. Test an invalid attempt to save a doc with a missing auth token. Test an invalid attempt to save a doc with a bad auth token. Test an invalid attempt to save a doc as a non-admin. Test the case where some documents fail against their schema. Test the case where the collection/schema does not exist. Test an attempt to save documents with an invalid JSON body. Test all valid cases for saving documents. Test all valid cases for saving edges. Test updating existing documents. Test updating existing edge. Test replacing of existing documents. Test where we want to raise errors on duplicate documents. Test ignoring duplicate, existing documents when saving. Test an ad-hoc query made by an admin. Test an ad-hoc query error as a non-admin. Test the error response for an ad-hoc admin query without auth. Test getting more data via a query cursor and setting batch size. # Try to get the same cursor again Test a query error with a stored query name that does not exist. Test a query error with a missing bind variable. Test the case where we query a collection with specific workspace access. # Remove all test vertices and create one with a ws_id # see ./mock_workspace/endpoints.json Test the case where we try to query a collection without the right workspace access. # Remove all test vertices and create one with a ws_id # see ./mock_workspace/endpoints.json Test that users cannot set the ws_ids param. Test the case where we try to authorize a query using an invalid auth token. Test that the 'ws_ids' bind-var is set for RE_ADMINs. # This is the same query as list_test_vertices.aql in the spec # see ./mock_workspace/endpoints.json Test that an invalid bulk save returns a 400 response Unknown data source name should yield 404 status. # Just assert that it returns any json in the body | 2.344204 | 2 |
simuvex/procedures/libc___so___6/fileno.py | praetorian-inc/simuvex | 8 | 6618799 | import simuvex
from simuvex.s_type import SimTypeFd
import logging
l = logging.getLogger("simuvex.procedures.fileno")
######################################
# memset
######################################
class fileno(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, f):
self.argument_types = {0: SimTypeFd()}
self.return_type = SimTypeFd()
return f
| import simuvex
from simuvex.s_type import SimTypeFd
import logging
l = logging.getLogger("simuvex.procedures.fileno")
######################################
# memset
######################################
class fileno(simuvex.SimProcedure):
#pylint:disable=arguments-differ
def run(self, f):
self.argument_types = {0: SimTypeFd()}
self.return_type = SimTypeFd()
return f
| de | 0.806681 | ###################################### # memset ###################################### #pylint:disable=arguments-differ | 2.261552 | 2 |
src/customer.py | abhilasharevur/ospin-promotion | 0 | 6618800 | <gh_stars>0
from datetime import datetime
from promotions import rebate_scheme
from orders import log
def create_id():
pass
class Customer():
def __init__(self, id=0, first_name=None, last_name=None, age=None, contact=None, address=None,
history=None):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.age = age
self.contact = contact
self.address = address
self.history = history # example dict { 2020-10-02: "Liver purchased - 3",
# 2020-10-12: "Bonus redeemed - heart 0, liver 2, lung 1"}
def get_id(self):
if self.id == 0:
self.id = create_id() # futuristic function to create id for new customer
return self.id
def get_firstname(self):
return self.first_name
def get_lastname(self):
return self.last_name
def get_history(self):
if self.history is not None:
return self.history
def add_transaction(self, data):
self.history.append(data)
class CustomerPromo(Customer):
def __init__(self, promo_type=None, organ=None, cash=0, price=0, bonus_ratio=0):
super().__init__()
self.organ = organ
self.cash = cash
self.price = price
self.bonus_ratio = bonus_ratio
self.promo_type = promo_type
self.bonus_to_receive = None
def get_promo_results(self):
try:
if self.promo_type:
possibles = globals().copy()
possibles.update(locals())
method = possibles.get(self.promo_type)
if not method:
raise NotImplementedError("Method %s not implemented" % self.promo_type)
self.bonus_to_receive = method(organ_to_purchase=self.organ, available_cash=self.cash,
product_price=self.price, bonus_ratio=self.bonus_ratio)
return
else:
print("please input the type of promotion")
except NotImplementedError as e:
print("Method %s not implemented" % self.promo_type)
log.error(e)
except Exception as err:
print(err)
log.error(err)
| from datetime import datetime
from promotions import rebate_scheme
from orders import log
def create_id():
pass
class Customer():
def __init__(self, id=0, first_name=None, last_name=None, age=None, contact=None, address=None,
history=None):
self.id = id
self.first_name = first_name
self.last_name = last_name
self.age = age
self.contact = contact
self.address = address
self.history = history # example dict { 2020-10-02: "Liver purchased - 3",
# 2020-10-12: "Bonus redeemed - heart 0, liver 2, lung 1"}
def get_id(self):
if self.id == 0:
self.id = create_id() # futuristic function to create id for new customer
return self.id
def get_firstname(self):
return self.first_name
def get_lastname(self):
return self.last_name
def get_history(self):
if self.history is not None:
return self.history
def add_transaction(self, data):
self.history.append(data)
class CustomerPromo(Customer):
def __init__(self, promo_type=None, organ=None, cash=0, price=0, bonus_ratio=0):
super().__init__()
self.organ = organ
self.cash = cash
self.price = price
self.bonus_ratio = bonus_ratio
self.promo_type = promo_type
self.bonus_to_receive = None
def get_promo_results(self):
try:
if self.promo_type:
possibles = globals().copy()
possibles.update(locals())
method = possibles.get(self.promo_type)
if not method:
raise NotImplementedError("Method %s not implemented" % self.promo_type)
self.bonus_to_receive = method(organ_to_purchase=self.organ, available_cash=self.cash,
product_price=self.price, bonus_ratio=self.bonus_ratio)
return
else:
print("please input the type of promotion")
except NotImplementedError as e:
print("Method %s not implemented" % self.promo_type)
log.error(e)
except Exception as err:
print(err)
log.error(err) | en | 0.563338 | # example dict { 2020-10-02: "Liver purchased - 3", # 2020-10-12: "Bonus redeemed - heart 0, liver 2, lung 1"} # futuristic function to create id for new customer | 2.952887 | 3 |
exec/src/klio_exec/commands/audit_steps/tempfile_usage.py | gaybro8777/klio | 705 | 6618801 | <filename>exec/src/klio_exec/commands/audit_steps/tempfile_usage.py
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import traceback
from klio_exec.commands.audit_steps import base
class TempFileUsage(base.BaseKlioAuditStep):
"""Avoid leaky file descriptors from `tempfile.TemporaryFile`."""
AUDIT_STEP_NAME = "tempfile"
PACKAGES_TO_IGNORE = ("_pytest",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tempfile_used = False
self._tempfile_tracebacks = []
def _mock_tempfile(self):
"""Override tempfile.TemporaryFile in the user's code
with a MockTemporaryFile that marks the class attribute
`TempfileUsage.AuditStep.mock_temporary_file_was used`
as True before returning an actual tempfile.TemporaryFile.
Ignores any use of tempfile.TemporaryFile by
pytest.
"""
RealTempFile = tempfile.TemporaryFile
def MockTemporaryFile(*args, **kwargs):
stack = traceback.extract_stack()[:-1]
caller_frame = stack[-1]
should_ignore = any(
[
("/%s/" % ignored) in caller_frame.filename
for ignored in TempFileUsage.PACKAGES_TO_IGNORE
]
)
if not should_ignore:
self._tempfile_used = True
self._tempfile_tracebacks.append(stack)
return RealTempFile(*args, **kwargs)
tempfile.TemporaryFile = MockTemporaryFile
def before_tests(self):
self._mock_tempfile()
def after_tests(self):
if self._tempfile_used:
self.emit_error(
"`tempfile.TemporaryFile` was used! Please use `tempfile."
"NamedTemporaryFile` instead to avoid leaking files. "
"Traceback:",
self._tempfile_tracebacks[0],
)
# shortcut for registering plugins in setup.py
_init = TempFileUsage
| <filename>exec/src/klio_exec/commands/audit_steps/tempfile_usage.py
# Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import tempfile
import traceback
from klio_exec.commands.audit_steps import base
class TempFileUsage(base.BaseKlioAuditStep):
"""Avoid leaky file descriptors from `tempfile.TemporaryFile`."""
AUDIT_STEP_NAME = "tempfile"
PACKAGES_TO_IGNORE = ("_pytest",)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tempfile_used = False
self._tempfile_tracebacks = []
def _mock_tempfile(self):
"""Override tempfile.TemporaryFile in the user's code
with a MockTemporaryFile that marks the class attribute
`TempfileUsage.AuditStep.mock_temporary_file_was used`
as True before returning an actual tempfile.TemporaryFile.
Ignores any use of tempfile.TemporaryFile by
pytest.
"""
RealTempFile = tempfile.TemporaryFile
def MockTemporaryFile(*args, **kwargs):
stack = traceback.extract_stack()[:-1]
caller_frame = stack[-1]
should_ignore = any(
[
("/%s/" % ignored) in caller_frame.filename
for ignored in TempFileUsage.PACKAGES_TO_IGNORE
]
)
if not should_ignore:
self._tempfile_used = True
self._tempfile_tracebacks.append(stack)
return RealTempFile(*args, **kwargs)
tempfile.TemporaryFile = MockTemporaryFile
def before_tests(self):
self._mock_tempfile()
def after_tests(self):
if self._tempfile_used:
self.emit_error(
"`tempfile.TemporaryFile` was used! Please use `tempfile."
"NamedTemporaryFile` instead to avoid leaking files. "
"Traceback:",
self._tempfile_tracebacks[0],
)
# shortcut for registering plugins in setup.py
_init = TempFileUsage
| en | 0.792221 | # Copyright 2020 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Avoid leaky file descriptors from `tempfile.TemporaryFile`. Override tempfile.TemporaryFile in the user's code with a MockTemporaryFile that marks the class attribute `TempfileUsage.AuditStep.mock_temporary_file_was used` as True before returning an actual tempfile.TemporaryFile. Ignores any use of tempfile.TemporaryFile by pytest. # shortcut for registering plugins in setup.py | 2.129487 | 2 |
data.py | fionn-mac/seq2seq-PyTorch | 1 | 6618802 |
class Data(object):
def __init__(self, name):
self.name = name
self.word2index = {"PAD" : 0, "SOS": 1, "EOS" : 2}
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"}
self.n_words = 3 # Count special tokens
def add_sentence(self, sentence):
for word in sentence:
self.add_word(word)
def add_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
|
class Data(object):
def __init__(self, name):
self.name = name
self.word2index = {"PAD" : 0, "SOS": 1, "EOS" : 2}
self.word2count = {}
self.index2word = {0: "PAD", 1: "SOS", 2: "EOS"}
self.n_words = 3 # Count special tokens
def add_sentence(self, sentence):
for word in sentence:
self.add_word(word)
def add_word(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
| en | 0.807711 | # Count special tokens | 3.695992 | 4 |
gitfeed/gitfeed.py | Ritiek/GitHub-Newsfeed | 0 | 6618803 | <filename>gitfeed/gitfeed.py
#!/usr/bin/env python
# Background color for labels
from colorama import Fore, Back, Style, init
import json
from datetime import datetime
import pydoc
import requests
import argparse
import sys
import time
import os.path
try:
import configparser
except:
from six.moves import configparser
def get_args(argv=None):
file_path = set_configuration()
conf = configparser.SafeConfigParser()
conf.read(file_path)
user = conf.get('GitHub Newsfeed', 'user')
max_page = conf.get('GitHub Newsfeed', 'max_page')
quiet = conf.get('GitHub Newsfeed', 'quiet')
no_time_stamp = conf.get('GitHub Newsfeed', 'no_time_stamp')
no_style = conf.get('GitHub Newsfeed', 'no_style')
parser = argparse.ArgumentParser(description='Check your GitHub Newsfeed via the command-line.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-u', '--user', default=user,
help='GitHub username for the user to fetch newsfeed for')
parser.add_argument('-p', '--pages', default=max_page,
help='number of newsfeed pages to fetch')
parser.add_argument('-q', '--quiet', default=False,
help='hide comment body in issues & PRs', action='store_true')
parser.add_argument('-nt', '--no-time-stamp', default=False,
help='hide time-stamp of events', action='store_true')
parser.add_argument('-ns', '--no-style', default=False,
help='show plain white text with no colors or style', action='store_true')
return parser.parse_args(sys.argv[1:])
def set_configuration():
conf = configparser.SafeConfigParser()
home = os.path.expanduser('~')
folder_name = '.gitfeed'
folder_path = os.path.join(home, folder_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_name = 'gitfeed.ini'
file_path = os.path.join(home, folder_name, file_name)
if not os.path.isfile(file_path):
if sys.version_info > (3,0):
user = input("What's your GitHub username? ")
else:
user = raw_input("What's your GitHub username? ")
print('Writing configuration to ' + file_path)
conf.add_section('GitHub Newsfeed')
conf.set('GitHub Newsfeed', 'user', user)
conf.set('GitHub Newsfeed', 'max_page', '1')
conf.set('GitHub Newsfeed', 'quiet', 'False')
conf.set('GitHub Newsfeed', 'no_time_stamp', 'False')
conf.set('GitHub Newsfeed', 'no_style', 'False')
with open(file_path, 'w') as configfile:
conf.write(configfile)
print('')
return file_path
def remove_color():
Fore.GREEN = ''
Fore.CYAN = ''
Fore.RED = ''
Fore.YELLOW = ''
Fore.MAGENTA = ''
Fore.BLUE = ''
Fore.WHITE = ''
Style.BRIGHT = ''
Back.BLUE = ''
return
def fix_encoding(query):
if sys.version_info > (3, 0):
return query
else:
return query.encode('utf-8')
# review PR
def PRReviewEvent(item, quiet):
user = item['actor']['login']
repo = item['repo']['name']
#commit = item['payload']['comment']['commit_id']
#link = item['payload']['pull_request']['html_url']
#title = item['payload']['pull_request']['title']
number = item['payload']['pull_request']['number']
body = item['payload']['comment']['body']
event_output = [fix_encoding(Fore.CYAN + Style.BRIGHT + '{} reviewed pull request {} on {}'.format(user, number, repo))]
if not quiet:
event_output.append(fix_encoding(body))
return "\n".join(event_output)
# open PR, close PR
def PREvent(item, quiet):
user = item['actor']['login']
repo = item['repo']['name']
#link = item['payload']['pull_request']['html_url']
state = item['payload']['pull_request']['state']
number = item['payload']['pull_request']['number']
title = item['payload']['pull_request']['title']
event_output = []
if state == 'open':
event_output.append(fix_encoding(Fore.CYAN + '{} opened pull request {} on {}'.format(user, number, repo)))
event_output.append(fix_encoding(Style.BRIGHT + fix_encoding(title)))
body = item['payload']['pull_request']['body']
if not quiet and not body is None:
event_output.append(fix_encoding(body))
else:
event_output.append(fix_encoding(Fore.CYAN + '{} closed pull request {} on {}'.format(user, number, repo)))
event_output.append(fix_encoding(Style.BRIGHT + title))
return "\n".join(event_output)
# comment on issue, PR
def issueCommentEvent(item, quiet):
user = item['actor']['login']
repo = item['repo']['name']
#link = item['payload']['issue']['html_url']
#labels = item['payload']['issue']['labels'] # FIX_ME
#for x in labels:
# print(x['name'])
#state = item['payload']['action']
number = item['payload']['issue']['number']
#title = item['payload']['issue']['title']
try:
if item['payload']['issue']['pull_request']:
group = 'pull request'
except:
group = 'issue'
event_output = [fix_encoding(Fore.CYAN + Style.BRIGHT + '{} commented on {} {} on {}'.format(user, group, number, repo))]
if not quiet:
body = item['payload']['comment']['body']
event_output.append(fix_encoding(body))
return "\n".join(event_output)
# open issue, close issue
def issuesEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = item['payload']['issue']['html_url']
state = item['payload']['action']
number = item['payload']['issue']['number']
event_output = [fix_encoding(Fore.RED + Style.BRIGHT + '{} {} issue {} on {}'.format(user, state, number, repo))]
title = item['payload']['issue']['title']
event_output.append(fix_encoding(Style.BRIGHT + title))
return "\n".join(event_output)
# comment on a commit
def commitCommentEvent(item, quiet):
user = item['actor']['login']
repo = item['repo']['name']
#link = item['payload']['issue']['html_url']
body = item['payload']['comment']['body']
event_output = [fix_encoding(Fore.CYAN + Style.BRIGHT + '{} commented on {}'.format(user, repo))]
if not quiet:
event_output.append(fix_encoding(body))
return "\n".join(event_output)
# starred by following
def watchEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.YELLOW + '{} starred {}'.format(user, repo))
return event_output
# forked by following
def forkEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.GREEN + '{} forked {}'.format(user, repo))
return event_output
# delete branch
def deleteEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
branch = item['payload']['ref']
event_output = fix_encoding(Fore.RED + '{} deleted branch {} at {}'.format(user, branch, repo))
return event_output
# push commits
def pushEvent(item):
user = item['actor']['login']
size = item['payload']['size']
repo = item['repo']['name']
branch = item['payload']['ref'].split('/')[-1]
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.BLUE + '{} pushed {} new commit(s) to {} at {}'.format(user, size, branch, repo))
return event_output
# create repo, branch
def createEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
group = item['payload']['ref_type']
#link = 'https://github.com/' + item['repo']['name']
event_output = ""
if group == "repository":
event_output = fix_encoding(Fore.MAGENTA + Style.BRIGHT + '{} created {} {}'.format(user, group, repo))
else:
branch = item['payload']['ref']
event_output = fix_encoding(Fore.MAGENTA + Style.BRIGHT + '{} created {} {} at {}'.format(user, group, branch, repo))
return event_output
# make public repo
def publicEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.MAGENTA + '{} made {} public'.format(user, repo))
return event_output
# add collab
def memberEvent(item):
user = item['actor']['login']
action = item['payload']['action']
collab = item['payload']['member']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.MAGENTA + '{} {} {} as a collaborator to {}'.format(user, action, collab, repo))
return event_output
def get_time_difference(created_at):
created_at = time.strptime(created_at, '%Y-%m-%dT%H:%M:%SZ')
created_at = time.mktime(created_at)
current_time = datetime.utcnow().replace(microsecond=0)
current_time = current_time.isoformat() + 'Z'
current_time = time.strptime(current_time, '%Y-%m-%dT%H:%M:%SZ')
current_time = time.mktime(current_time)
difference = current_time - created_at
days = ('day', int(int(difference) / 86400))
hours = ('hour', int(int(difference) / 3600 % 24))
minutes = ('minute', int(int(difference) / 60 % 60))
seconds = ('second', int(int(difference) % 60))
human_readable = (seconds, minutes, hours, days)
for item in human_readable:
if not item[1] == 0:
if item [1] == 1:
statement = '{} {} ago'.format(item[1], item[0])
else:
statement = '{} {}s ago'.format(item[1], item[0])
return statement
def get_page(user, page, quiet, nt):
url = 'https://api.github.com/users/' + user +'/received_events?page='
response = json.loads(requests.get(url + str(page)).text)
output = []
for item in response:
if not nt:
created_at = item['created_at']
difference = get_time_difference(created_at)
#print(Fore.WHITE + Style.NORMAL + Back.BLUE + difference)
output.append(Fore.WHITE + Back.BLUE + difference + Back.RESET)
event = item['type']
if event == "PullRequestReviewCommentEvent": # review PR
output.append(PRReviewEvent(item, quiet))
elif event == "PullRequestEvent": # open PR, close PR
output.append(PREvent(item, quiet))
elif event == "IssueCommentEvent": # comment on issue/PR
output.append(issueCommentEvent(item, quiet))
elif event == "IssuesEvent": # open issue, close issue
output.append(issuesEvent(item))
elif event == "CommitCommentEvent":
output.append(commitCommentEvent(item, quiet))
elif event == "WatchEvent": # starred
output.append(watchEvent(item))
elif event == "ForkEvent": # fork
output.append(forkEvent(item))
elif event == "DeleteEvent": # delete branch
output.append(deleteEvent(item))
elif event == "PushEvent": # push commits
output.append(pushEvent(item))
elif event == "CreateEvent": # make new repo, branch
output.append(createEvent(item))
elif event == "PublicEvent": # make repo public
output.append(publicEvent(item))
elif event == "MemberEvent": # add collab
output.append(memberEvent(item))
output.append("")
return "\n".join(output)
def get_pages(user, max_page, quiet, nt):
output = []
for page in range(1, max_page+1):
output.append(get_page(user, page, quiet, nt))
pydoc.pager("\n".join(output))
def cli():
init(autoreset=True)
args = get_args()
user = args.user
max_page = int(args.pages)
quiet = args.quiet
nt = args.no_time_stamp
if args.no_style:
remove_color()
get_pages(user, max_page, quiet, nt)
if __name__ == '__main__':
cli()
| <filename>gitfeed/gitfeed.py
#!/usr/bin/env python
# Background color for labels
from colorama import Fore, Back, Style, init
import json
from datetime import datetime
import pydoc
import requests
import argparse
import sys
import time
import os.path
try:
import configparser
except:
from six.moves import configparser
def get_args(argv=None):
file_path = set_configuration()
conf = configparser.SafeConfigParser()
conf.read(file_path)
user = conf.get('GitHub Newsfeed', 'user')
max_page = conf.get('GitHub Newsfeed', 'max_page')
quiet = conf.get('GitHub Newsfeed', 'quiet')
no_time_stamp = conf.get('GitHub Newsfeed', 'no_time_stamp')
no_style = conf.get('GitHub Newsfeed', 'no_style')
parser = argparse.ArgumentParser(description='Check your GitHub Newsfeed via the command-line.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-u', '--user', default=user,
help='GitHub username for the user to fetch newsfeed for')
parser.add_argument('-p', '--pages', default=max_page,
help='number of newsfeed pages to fetch')
parser.add_argument('-q', '--quiet', default=False,
help='hide comment body in issues & PRs', action='store_true')
parser.add_argument('-nt', '--no-time-stamp', default=False,
help='hide time-stamp of events', action='store_true')
parser.add_argument('-ns', '--no-style', default=False,
help='show plain white text with no colors or style', action='store_true')
return parser.parse_args(sys.argv[1:])
def set_configuration():
conf = configparser.SafeConfigParser()
home = os.path.expanduser('~')
folder_name = '.gitfeed'
folder_path = os.path.join(home, folder_name)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_name = 'gitfeed.ini'
file_path = os.path.join(home, folder_name, file_name)
if not os.path.isfile(file_path):
if sys.version_info > (3,0):
user = input("What's your GitHub username? ")
else:
user = raw_input("What's your GitHub username? ")
print('Writing configuration to ' + file_path)
conf.add_section('GitHub Newsfeed')
conf.set('GitHub Newsfeed', 'user', user)
conf.set('GitHub Newsfeed', 'max_page', '1')
conf.set('GitHub Newsfeed', 'quiet', 'False')
conf.set('GitHub Newsfeed', 'no_time_stamp', 'False')
conf.set('GitHub Newsfeed', 'no_style', 'False')
with open(file_path, 'w') as configfile:
conf.write(configfile)
print('')
return file_path
def remove_color():
Fore.GREEN = ''
Fore.CYAN = ''
Fore.RED = ''
Fore.YELLOW = ''
Fore.MAGENTA = ''
Fore.BLUE = ''
Fore.WHITE = ''
Style.BRIGHT = ''
Back.BLUE = ''
return
def fix_encoding(query):
if sys.version_info > (3, 0):
return query
else:
return query.encode('utf-8')
# review PR
def PRReviewEvent(item, quiet):
user = item['actor']['login']
repo = item['repo']['name']
#commit = item['payload']['comment']['commit_id']
#link = item['payload']['pull_request']['html_url']
#title = item['payload']['pull_request']['title']
number = item['payload']['pull_request']['number']
body = item['payload']['comment']['body']
event_output = [fix_encoding(Fore.CYAN + Style.BRIGHT + '{} reviewed pull request {} on {}'.format(user, number, repo))]
if not quiet:
event_output.append(fix_encoding(body))
return "\n".join(event_output)
# open PR, close PR
def PREvent(item, quiet):
user = item['actor']['login']
repo = item['repo']['name']
#link = item['payload']['pull_request']['html_url']
state = item['payload']['pull_request']['state']
number = item['payload']['pull_request']['number']
title = item['payload']['pull_request']['title']
event_output = []
if state == 'open':
event_output.append(fix_encoding(Fore.CYAN + '{} opened pull request {} on {}'.format(user, number, repo)))
event_output.append(fix_encoding(Style.BRIGHT + fix_encoding(title)))
body = item['payload']['pull_request']['body']
if not quiet and not body is None:
event_output.append(fix_encoding(body))
else:
event_output.append(fix_encoding(Fore.CYAN + '{} closed pull request {} on {}'.format(user, number, repo)))
event_output.append(fix_encoding(Style.BRIGHT + title))
return "\n".join(event_output)
# comment on issue, PR
def issueCommentEvent(item, quiet):
user = item['actor']['login']
repo = item['repo']['name']
#link = item['payload']['issue']['html_url']
#labels = item['payload']['issue']['labels'] # FIX_ME
#for x in labels:
# print(x['name'])
#state = item['payload']['action']
number = item['payload']['issue']['number']
#title = item['payload']['issue']['title']
try:
if item['payload']['issue']['pull_request']:
group = 'pull request'
except:
group = 'issue'
event_output = [fix_encoding(Fore.CYAN + Style.BRIGHT + '{} commented on {} {} on {}'.format(user, group, number, repo))]
if not quiet:
body = item['payload']['comment']['body']
event_output.append(fix_encoding(body))
return "\n".join(event_output)
# open issue, close issue
def issuesEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = item['payload']['issue']['html_url']
state = item['payload']['action']
number = item['payload']['issue']['number']
event_output = [fix_encoding(Fore.RED + Style.BRIGHT + '{} {} issue {} on {}'.format(user, state, number, repo))]
title = item['payload']['issue']['title']
event_output.append(fix_encoding(Style.BRIGHT + title))
return "\n".join(event_output)
# comment on a commit
def commitCommentEvent(item, quiet):
user = item['actor']['login']
repo = item['repo']['name']
#link = item['payload']['issue']['html_url']
body = item['payload']['comment']['body']
event_output = [fix_encoding(Fore.CYAN + Style.BRIGHT + '{} commented on {}'.format(user, repo))]
if not quiet:
event_output.append(fix_encoding(body))
return "\n".join(event_output)
# starred by following
def watchEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.YELLOW + '{} starred {}'.format(user, repo))
return event_output
# forked by following
def forkEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.GREEN + '{} forked {}'.format(user, repo))
return event_output
# delete branch
def deleteEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
branch = item['payload']['ref']
event_output = fix_encoding(Fore.RED + '{} deleted branch {} at {}'.format(user, branch, repo))
return event_output
# push commits
def pushEvent(item):
user = item['actor']['login']
size = item['payload']['size']
repo = item['repo']['name']
branch = item['payload']['ref'].split('/')[-1]
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.BLUE + '{} pushed {} new commit(s) to {} at {}'.format(user, size, branch, repo))
return event_output
# create repo, branch
def createEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
group = item['payload']['ref_type']
#link = 'https://github.com/' + item['repo']['name']
event_output = ""
if group == "repository":
event_output = fix_encoding(Fore.MAGENTA + Style.BRIGHT + '{} created {} {}'.format(user, group, repo))
else:
branch = item['payload']['ref']
event_output = fix_encoding(Fore.MAGENTA + Style.BRIGHT + '{} created {} {} at {}'.format(user, group, branch, repo))
return event_output
# make public repo
def publicEvent(item):
user = item['actor']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.MAGENTA + '{} made {} public'.format(user, repo))
return event_output
# add collab
def memberEvent(item):
user = item['actor']['login']
action = item['payload']['action']
collab = item['payload']['member']['login']
repo = item['repo']['name']
#link = 'https://github.com/' + item['repo']['name']
event_output = fix_encoding(Fore.MAGENTA + '{} {} {} as a collaborator to {}'.format(user, action, collab, repo))
return event_output
def get_time_difference(created_at):
created_at = time.strptime(created_at, '%Y-%m-%dT%H:%M:%SZ')
created_at = time.mktime(created_at)
current_time = datetime.utcnow().replace(microsecond=0)
current_time = current_time.isoformat() + 'Z'
current_time = time.strptime(current_time, '%Y-%m-%dT%H:%M:%SZ')
current_time = time.mktime(current_time)
difference = current_time - created_at
days = ('day', int(int(difference) / 86400))
hours = ('hour', int(int(difference) / 3600 % 24))
minutes = ('minute', int(int(difference) / 60 % 60))
seconds = ('second', int(int(difference) % 60))
human_readable = (seconds, minutes, hours, days)
for item in human_readable:
if not item[1] == 0:
if item [1] == 1:
statement = '{} {} ago'.format(item[1], item[0])
else:
statement = '{} {}s ago'.format(item[1], item[0])
return statement
def get_page(user, page, quiet, nt):
url = 'https://api.github.com/users/' + user +'/received_events?page='
response = json.loads(requests.get(url + str(page)).text)
output = []
for item in response:
if not nt:
created_at = item['created_at']
difference = get_time_difference(created_at)
#print(Fore.WHITE + Style.NORMAL + Back.BLUE + difference)
output.append(Fore.WHITE + Back.BLUE + difference + Back.RESET)
event = item['type']
if event == "PullRequestReviewCommentEvent": # review PR
output.append(PRReviewEvent(item, quiet))
elif event == "PullRequestEvent": # open PR, close PR
output.append(PREvent(item, quiet))
elif event == "IssueCommentEvent": # comment on issue/PR
output.append(issueCommentEvent(item, quiet))
elif event == "IssuesEvent": # open issue, close issue
output.append(issuesEvent(item))
elif event == "CommitCommentEvent":
output.append(commitCommentEvent(item, quiet))
elif event == "WatchEvent": # starred
output.append(watchEvent(item))
elif event == "ForkEvent": # fork
output.append(forkEvent(item))
elif event == "DeleteEvent": # delete branch
output.append(deleteEvent(item))
elif event == "PushEvent": # push commits
output.append(pushEvent(item))
elif event == "CreateEvent": # make new repo, branch
output.append(createEvent(item))
elif event == "PublicEvent": # make repo public
output.append(publicEvent(item))
elif event == "MemberEvent": # add collab
output.append(memberEvent(item))
output.append("")
return "\n".join(output)
def get_pages(user, max_page, quiet, nt):
output = []
for page in range(1, max_page+1):
output.append(get_page(user, page, quiet, nt))
pydoc.pager("\n".join(output))
def cli():
init(autoreset=True)
args = get_args()
user = args.user
max_page = int(args.pages)
quiet = args.quiet
nt = args.no_time_stamp
if args.no_style:
remove_color()
get_pages(user, max_page, quiet, nt)
if __name__ == '__main__':
cli()
| en | 0.646773 | #!/usr/bin/env python # Background color for labels # review PR #commit = item['payload']['comment']['commit_id'] #link = item['payload']['pull_request']['html_url'] #title = item['payload']['pull_request']['title'] # open PR, close PR #link = item['payload']['pull_request']['html_url'] # comment on issue, PR #link = item['payload']['issue']['html_url'] #labels = item['payload']['issue']['labels'] # FIX_ME #for x in labels: # print(x['name']) #state = item['payload']['action'] #title = item['payload']['issue']['title'] # open issue, close issue #link = item['payload']['issue']['html_url'] # comment on a commit #link = item['payload']['issue']['html_url'] # starred by following #link = 'https://github.com/' + item['repo']['name'] # forked by following #link = 'https://github.com/' + item['repo']['name'] # delete branch #link = 'https://github.com/' + item['repo']['name'] # push commits #link = 'https://github.com/' + item['repo']['name'] # create repo, branch #link = 'https://github.com/' + item['repo']['name'] # make public repo #link = 'https://github.com/' + item['repo']['name'] # add collab #link = 'https://github.com/' + item['repo']['name'] #print(Fore.WHITE + Style.NORMAL + Back.BLUE + difference) # review PR # open PR, close PR # comment on issue/PR # open issue, close issue # starred # fork # delete branch # push commits # make new repo, branch # make repo public # add collab | 2.467928 | 2 |
dhdt/processing/matching_tools_organization.py | GO-Eratosthenes/dhdt | 0 | 6618804 | # organizational functions
import numpy as np
from .matching_tools_frequency_filters import \
perdecomp, thresh_masking
from .matching_tools_frequency_correlators import \
cosi_corr, phase_only_corr, symmetric_phase_corr, amplitude_comp_corr, \
orientation_corr, phase_corr, cross_corr, masked_cosine_corr, \
binary_orientation_corr, masked_corr, robust_corr, windrose_corr, \
gaussian_transformed_phase_corr, upsampled_cross_corr, \
projected_phase_corr
from .matching_tools_frequency_subpixel import \
phase_tpss, phase_svd, phase_radon, phase_hough, phase_ransac, \
phase_weighted_pca, phase_pca, phase_lsq, phase_difference
from .matching_tools_spatial_correlators import \
normalized_cross_corr, sum_sq_diff, sum_sad_diff, cumulative_cross_corr, \
maximum_likelihood, weighted_normalized_cross_correlation
from .matching_tools_spatial_subpixel import \
get_top_gaussian, get_top_parabolic, get_top_moment, \
get_top_mass, get_top_centroid, get_top_blais, get_top_ren, \
get_top_birchfield, get_top_equiangular, get_top_triangular, \
get_top_esinc, get_top_paraboloid, get_top_2d_gaussian
from .matching_tools_correlation_metrics import \
hessian_spread, gauss_spread
from .matching_tools_differential import \
affine_optical_flow, hough_optical_flow
# admin
def list_frequency_correlators():
""" list the abbreviations of the different implemented correlators, being:
cosi_corr - cosicorr
phas_only - phase only correlation
symm_phas - symmetric phase correlation
ampl_comp - amplitude compensation phase correlation
orie_corr - orientation correlation
mask_corr - masked normalized cross correlation
bina_phas - binary phase correlation
wind_corr - windrose correlation
gaus_phas - gaussian transformed phase correlation
upsp_corr - upsampled cross correlation
cros_corr - cross correlation
robu_corr - robust phase correlation
proj_phas - projected phase correlation
phas_corr - phase correlation
"""
correlator_list = ['cosi_corr', 'phas_only', 'symm_phas', 'ampl_comp',
'orie_corr', 'mask_corr', 'bina_phas', 'wind_corr',
'gaus_phas', 'upsp_corr', 'cros_corr', 'robu_corr',
'proj_phas', 'phas_corr']
return correlator_list
def list_spatial_correlators():
""" list the abbreviations of the different implemented correlators, being:
norm_corr - normalized cross correlation
cumu_corr - cumulative cross correlation
sq_diff - sum of squared differences
sad_diff - sum of absolute differences
max_like - phase correlation
wght_corr - weighted normalized cross correlation
"""
correlator_list = ['norm_corr', 'cumu_corr', 'sq_diff', 'sad_diff',
'max_like', 'wght_corr']
return correlator_list
def list_differential_correlators():
correlator_list = ['lucas_kan', 'lucas_aff', 'hough_opt_flw']
return correlator_list
def list_phase_estimators():
""" list the abbreviations of the different implemented phase plane
estimation procedures, being:
* 'tpss' : two point step size
* 'svd' : single value decomposition
* 'radon' : radon transform
* 'hough' : hough transform
* 'ransac' : random sampling and consensus
* 'wpca' : weighted principle component analysis
* 'pca' : principle component analysis
* 'lsq' : least squares estimation
* 'diff' : phase difference
"""
subpix_list = ['tpss','svd','radon', 'hough', 'ransac', 'wpca',\
'pca', 'lsq', 'diff']
return subpix_list
def list_peak_estimators():
""" list the abbreviations of the different implemented for the peak fitting
of the similarity function, being:
'gauss_1' : 1D Gaussian fitting
'parab_1' : 1D parabolic fitting
'moment' : 2D moment of the peak
'mass' : 2D center of mass fitting
'centroid' : 2D estimate the centroid
'blais' : 1D estimate through forth order filter
'ren' : 1D parabolic fitting
'birch' : 1D weighted sum
'eqang' : 1D equiangular line fitting
'trian' : 1D triangular fitting
'esinc' : 1D exponential esinc function
'gauss_2' : 2D Gaussian fitting
'parab_2' : 2D parabolic fitting
'optical_flow' : optical flow refinement
"""
subpix_list = ['gauss_1', 'parab_1', 'moment', 'mass', 'centroid',
'blais', 'ren', 'birch', 'eqang', 'trian', 'esinc',
'gauss_2', 'parab_2', 'optical_flow']
return subpix_list
# todo: include masks
def estimate_translation_of_two_subsets(I1, I2, M1, M2, correlator='lucas_kan',
**kwargs):
assert type(I1) == np.ndarray, ('please provide an array')
assert type(I2) == np.ndarray, ('please provide an array')
assert type(M1) == np.ndarray, ('please provide an array')
assert type(M2) == np.ndarray, ('please provide an array')
optical_flow_approaches = list_differential_correlators()
assert (correlator in optical_flow_approaches), \
('please provide a valid optical flow approach. ' +
'this can be one of the following:'+
f' { {*optical_flow_approaches} }')
if correlator in ['lucas_aff']:
di,dj,_,score = affine_optical_flow(I1, I2, model='affine',
preprocessing=
kwargs.get('preprocessing'))
if correlator in ['hough_opt_flw']:
num_est, max_amp = 1, 1
if kwargs.get('num_estimates') != None:
num_est = kwargs.get('num_estimates')
if kwargs.get('max_amp') != None:
max_amp = kwargs.get('max_amp')
if M1.size!=0: I1[~M1] = np.nan # set data outside mask to NaN
if M2.size!=0: I2[~M2] = np.nan # set data outside mask to NaN
di,dj,score = hough_optical_flow(I1, I2,
num_estimates=num_est,
preprocessing=kwargs.get('preprocessing'),
max_amp=max_amp)
else: #'lucas_kan'
di,dj,_,score = affine_optical_flow(I1, I2, model='simple',
preprocessing=
kwargs.get('preprocessing')
)
return di, dj, score
def match_translation_of_two_subsets(I1_sub,I2_sub,correlator,subpix,
M1_sub=np.array([]),
M2_sub=np.array([]) ):
assert type(I1_sub)==np.ndarray, ('please provide an array')
assert type(I2_sub)==np.ndarray, ('please provide an array')
assert type(M1_sub)==np.ndarray, ('please provide an array')
assert type(M2_sub)==np.ndarray, ('please provide an array')
frequency_based = list_frequency_correlators()
spatial_based = list_spatial_correlators()
# some sub-pixel methods use the peak of the correlation surface,
# while others need the phase of the cross-spectrum
phase_based = list_phase_estimators()
peak_based = list_peak_estimators()
assert ((correlator in frequency_based) or
(correlator in spatial_based)), ('please provide a valid correlation '+
'method. it can be one of the following:'+
f' { {*frequency_based,*spatial_based} }')
# reduce edge effects in frequency space
if correlator in frequency_based:
I1_sub,I2_sub = perdecomp(I1_sub)[0], perdecomp(I2_sub)[0]
# translational matching/estimation
if correlator in frequency_based:
# frequency correlator
if correlator in ['cosi_corr']:
Q = cosi_corr(I1_sub, I2_sub)[0]
elif correlator in ['phas_corr']:
Q = phase_corr(I1_sub, I2_sub)
elif correlator in ['phas_only']:
Q = phase_only_corr(I1_sub, I2_sub)
elif correlator in ['symm_phas']:
Q = symmetric_phase_corr(I1_sub, I2_sub)
elif correlator in ['ampl_comp']:
Q = amplitude_comp_corr(I1_sub, I2_sub)
elif correlator in ['orie_corr']:
Q = orientation_corr(I1_sub, I2_sub)
elif correlator in ['mask_corr']:
C = masked_corr(I1_sub, I2_sub, M1_sub, M2_sub)
if subpix in phase_based: Q = np.fft.fft2(C)
elif correlator in ['bina_phas']:
Q = binary_orientation_corr(I1_sub, I2_sub)
elif correlator in ['wind_corr']:
Q = windrose_corr(I1_sub, I2_sub)
elif correlator in ['gaus_phas']:
Q = gaussian_transformed_phase_corr(I1_sub, I2_sub)
elif correlator in ['upsp_corr']:
Q = upsampled_cross_corr(I1_sub, I2_sub)
elif correlator in ['cros_corr']:
Q = cross_corr(I1_sub, I2_sub)
elif correlator in ['robu_corr']:
Q = robust_corr(I1_sub, I2_sub)
elif correlator in ['proj_phas']:
C = projected_phase_corr(I1_sub, I2_sub, M1_sub, M2_sub)
if subpix in phase_based: Q = np.fft.fft2(C)
if (subpix in peak_based) and ('Q' in locals()):
C = np.fft.fftshift(np.real(np.fft.ifft2(Q)))
else:
# spatial correlator
if correlator in ['norm_corr']:
C = normalized_cross_corr(I1_sub, I2_sub)
elif correlator in ['cumu_corr']:
C = cumulative_cross_corr(I1_sub, I2_sub)
elif correlator in ['sq_diff']:
C = -1 * sum_sq_diff(I1_sub, I2_sub)
elif correlator in ['sad_diff']:
C = sum_sad_diff(I1_sub, I2_sub)
elif correlator in ['max_like']:
C = maximum_likelihood(I1_sub, I2_sub)
elif correlator in ['wght_corr']:
C = weighted_normalized_cross_correlation(I1_sub, I2_sub)
if subpix in phase_based:
Q = np.fft.fft2(C)
if (subpix in peak_based) or (subpix is None):
return C
else:
return Q
def estimate_subpixel(QC, subpix, m0=np.zeros((1,2))):
assert type(QC)==np.ndarray, ('please provide an array')
assert type(m0)==np.ndarray, ('please provide an array')
phase_based = list_phase_estimators()
peak_based = list_peak_estimators()
assert ((subpix in phase_based) or (subpix in peak_based)), \
('please provide a valid subpixel method.' +
'it can be one of the following:' +
f' { {*peak_based,*phase_based} }')
if subpix in peak_based: # correlation surface
if subpix in ['gauss_1']:
ddi,ddj,_,_ = get_top_gaussian(QC, top=m0)
elif subpix in ['parab_1']:
ddi,ddj,_,_= get_top_parabolic(QC, top=m0)
elif subpix in ['moment']:
ddi,ddj,_,_= get_top_moment(QC, ds=1, top=m0)
elif subpix in ['mass']:
ddi,ddj,_,_= get_top_mass(QC, top=m0)
elif subpix in ['centroid']:
ddi,ddj,_,_= get_top_centroid(QC, top=m0)
elif subpix in ['blais']:
ddi,ddj,_,_= get_top_blais(QC, top=m0)
elif subpix in ['ren']:
ddi,ddj,_,_= get_top_ren(QC, top=m0)
elif subpix in ['birch']:
ddi,ddj,_,_= get_top_birchfield(QC, top=m0)
elif subpix in ['eqang']:
ddi,ddj,_,_= get_top_equiangular(QC, top=m0)
elif subpix in ['trian']:
ddi,ddj,_,_= get_top_triangular(QC, top=m0)
elif subpix in ['esinc']:
ddi,ddj,_,_= get_top_esinc(QC, ds=1, top=m0)
elif subpix in ['gauss_2']:
ddi,ddj,_,_= get_top_2d_gaussian(QC, ds=1, top=m0)
elif subpix in ['parab_2t']:
ddi,ddj,_,_= get_top_paraboloid(QC, ds=1, top=m0)
elif subpix in phase_based: #cross-spectrum
if subpix in ['tpss']:
W = thresh_masking(QC)
(m,snr) = phase_tpss(QC, W, m0)
ddi, ddj = m[0], m[1]
elif subpix in ['svd']:
W = thresh_masking(QC)
ddi,ddj = phase_svd(QC, W)
elif subpix in ['radon']:
ddi,ddj = phase_radon(QC)
elif subpix in ['hough']:
ddi,ddj = phase_hough(QC)
elif subpix in ['ransac']:
ddi,ddj = phase_ransac(QC)
elif subpix in ['wpca']:
W = thresh_masking(QC)
ddi,ddj = phase_weighted_pca(QC, W)
elif subpix in ['pca']:
ddi,ddj = phase_pca(QC)
elif subpix in ['lsq']:
ddi,ddj = phase_lsq(QC)
elif subpix in ['diff']:
ddi,ddj = phase_difference(QC)
return ddi, ddj
def estimate_precision(C, di, dj, method='gaussian'):
""" given a similarity surface, estimate its matching dispersion
Parameters
----------
C : numpy.array, size=(m,n)
cross correlation function
di,dj : float
locational estimate of the cross-correlation peak, this can also be
negative, see Notes below
method : {'gaussian', 'hessian'}
Returns
-------
cov_ii, cov_jj : float
co-variance estimate
cov_ij : float
off-diagonal co-variance estimate
Notes
-----
It is important to know what type of coordinate systems exist, hence:
.. code-block:: text
coordinate | +--------> collumns
system 'ij'| |
| |
| j | image frame
--------+--------> |
| |
| v
| i rows
v
"""
if method in ['hessian']:
cov_ii,cov_jj,cov_ij = hessian_spread(C,
C.shape[0] // 2 + np.round(di).astype(int),
C.shape[1] // 2 + np.round(dj).astype(int))
else:
cov_ii,cov_jj,cov_ij,_,_ = gauss_spread(C,
C.shape[0]//2 + np.round(di).astype(int),
C.shape[1]//2 + np.round(dj).astype(int),
di-np.round(di), dj-np.round(dj), est='dist')
return cov_ii, cov_jj, cov_ij | # organizational functions
import numpy as np
from .matching_tools_frequency_filters import \
perdecomp, thresh_masking
from .matching_tools_frequency_correlators import \
cosi_corr, phase_only_corr, symmetric_phase_corr, amplitude_comp_corr, \
orientation_corr, phase_corr, cross_corr, masked_cosine_corr, \
binary_orientation_corr, masked_corr, robust_corr, windrose_corr, \
gaussian_transformed_phase_corr, upsampled_cross_corr, \
projected_phase_corr
from .matching_tools_frequency_subpixel import \
phase_tpss, phase_svd, phase_radon, phase_hough, phase_ransac, \
phase_weighted_pca, phase_pca, phase_lsq, phase_difference
from .matching_tools_spatial_correlators import \
normalized_cross_corr, sum_sq_diff, sum_sad_diff, cumulative_cross_corr, \
maximum_likelihood, weighted_normalized_cross_correlation
from .matching_tools_spatial_subpixel import \
get_top_gaussian, get_top_parabolic, get_top_moment, \
get_top_mass, get_top_centroid, get_top_blais, get_top_ren, \
get_top_birchfield, get_top_equiangular, get_top_triangular, \
get_top_esinc, get_top_paraboloid, get_top_2d_gaussian
from .matching_tools_correlation_metrics import \
hessian_spread, gauss_spread
from .matching_tools_differential import \
affine_optical_flow, hough_optical_flow
# admin
def list_frequency_correlators():
""" list the abbreviations of the different implemented correlators, being:
cosi_corr - cosicorr
phas_only - phase only correlation
symm_phas - symmetric phase correlation
ampl_comp - amplitude compensation phase correlation
orie_corr - orientation correlation
mask_corr - masked normalized cross correlation
bina_phas - binary phase correlation
wind_corr - windrose correlation
gaus_phas - gaussian transformed phase correlation
upsp_corr - upsampled cross correlation
cros_corr - cross correlation
robu_corr - robust phase correlation
proj_phas - projected phase correlation
phas_corr - phase correlation
"""
correlator_list = ['cosi_corr', 'phas_only', 'symm_phas', 'ampl_comp',
'orie_corr', 'mask_corr', 'bina_phas', 'wind_corr',
'gaus_phas', 'upsp_corr', 'cros_corr', 'robu_corr',
'proj_phas', 'phas_corr']
return correlator_list
def list_spatial_correlators():
""" list the abbreviations of the different implemented correlators, being:
norm_corr - normalized cross correlation
cumu_corr - cumulative cross correlation
sq_diff - sum of squared differences
sad_diff - sum of absolute differences
max_like - phase correlation
wght_corr - weighted normalized cross correlation
"""
correlator_list = ['norm_corr', 'cumu_corr', 'sq_diff', 'sad_diff',
'max_like', 'wght_corr']
return correlator_list
def list_differential_correlators():
correlator_list = ['lucas_kan', 'lucas_aff', 'hough_opt_flw']
return correlator_list
def list_phase_estimators():
""" list the abbreviations of the different implemented phase plane
estimation procedures, being:
* 'tpss' : two point step size
* 'svd' : single value decomposition
* 'radon' : radon transform
* 'hough' : hough transform
* 'ransac' : random sampling and consensus
* 'wpca' : weighted principle component analysis
* 'pca' : principle component analysis
* 'lsq' : least squares estimation
* 'diff' : phase difference
"""
subpix_list = ['tpss','svd','radon', 'hough', 'ransac', 'wpca',\
'pca', 'lsq', 'diff']
return subpix_list
def list_peak_estimators():
""" list the abbreviations of the different implemented for the peak fitting
of the similarity function, being:
'gauss_1' : 1D Gaussian fitting
'parab_1' : 1D parabolic fitting
'moment' : 2D moment of the peak
'mass' : 2D center of mass fitting
'centroid' : 2D estimate the centroid
'blais' : 1D estimate through forth order filter
'ren' : 1D parabolic fitting
'birch' : 1D weighted sum
'eqang' : 1D equiangular line fitting
'trian' : 1D triangular fitting
'esinc' : 1D exponential esinc function
'gauss_2' : 2D Gaussian fitting
'parab_2' : 2D parabolic fitting
'optical_flow' : optical flow refinement
"""
subpix_list = ['gauss_1', 'parab_1', 'moment', 'mass', 'centroid',
'blais', 'ren', 'birch', 'eqang', 'trian', 'esinc',
'gauss_2', 'parab_2', 'optical_flow']
return subpix_list
# todo: include masks
def estimate_translation_of_two_subsets(I1, I2, M1, M2, correlator='lucas_kan',
**kwargs):
assert type(I1) == np.ndarray, ('please provide an array')
assert type(I2) == np.ndarray, ('please provide an array')
assert type(M1) == np.ndarray, ('please provide an array')
assert type(M2) == np.ndarray, ('please provide an array')
optical_flow_approaches = list_differential_correlators()
assert (correlator in optical_flow_approaches), \
('please provide a valid optical flow approach. ' +
'this can be one of the following:'+
f' { {*optical_flow_approaches} }')
if correlator in ['lucas_aff']:
di,dj,_,score = affine_optical_flow(I1, I2, model='affine',
preprocessing=
kwargs.get('preprocessing'))
if correlator in ['hough_opt_flw']:
num_est, max_amp = 1, 1
if kwargs.get('num_estimates') != None:
num_est = kwargs.get('num_estimates')
if kwargs.get('max_amp') != None:
max_amp = kwargs.get('max_amp')
if M1.size!=0: I1[~M1] = np.nan # set data outside mask to NaN
if M2.size!=0: I2[~M2] = np.nan # set data outside mask to NaN
di,dj,score = hough_optical_flow(I1, I2,
num_estimates=num_est,
preprocessing=kwargs.get('preprocessing'),
max_amp=max_amp)
else: #'lucas_kan'
di,dj,_,score = affine_optical_flow(I1, I2, model='simple',
preprocessing=
kwargs.get('preprocessing')
)
return di, dj, score
def match_translation_of_two_subsets(I1_sub,I2_sub,correlator,subpix,
M1_sub=np.array([]),
M2_sub=np.array([]) ):
assert type(I1_sub)==np.ndarray, ('please provide an array')
assert type(I2_sub)==np.ndarray, ('please provide an array')
assert type(M1_sub)==np.ndarray, ('please provide an array')
assert type(M2_sub)==np.ndarray, ('please provide an array')
frequency_based = list_frequency_correlators()
spatial_based = list_spatial_correlators()
# some sub-pixel methods use the peak of the correlation surface,
# while others need the phase of the cross-spectrum
phase_based = list_phase_estimators()
peak_based = list_peak_estimators()
assert ((correlator in frequency_based) or
(correlator in spatial_based)), ('please provide a valid correlation '+
'method. it can be one of the following:'+
f' { {*frequency_based,*spatial_based} }')
# reduce edge effects in frequency space
if correlator in frequency_based:
I1_sub,I2_sub = perdecomp(I1_sub)[0], perdecomp(I2_sub)[0]
# translational matching/estimation
if correlator in frequency_based:
# frequency correlator
if correlator in ['cosi_corr']:
Q = cosi_corr(I1_sub, I2_sub)[0]
elif correlator in ['phas_corr']:
Q = phase_corr(I1_sub, I2_sub)
elif correlator in ['phas_only']:
Q = phase_only_corr(I1_sub, I2_sub)
elif correlator in ['symm_phas']:
Q = symmetric_phase_corr(I1_sub, I2_sub)
elif correlator in ['ampl_comp']:
Q = amplitude_comp_corr(I1_sub, I2_sub)
elif correlator in ['orie_corr']:
Q = orientation_corr(I1_sub, I2_sub)
elif correlator in ['mask_corr']:
C = masked_corr(I1_sub, I2_sub, M1_sub, M2_sub)
if subpix in phase_based: Q = np.fft.fft2(C)
elif correlator in ['bina_phas']:
Q = binary_orientation_corr(I1_sub, I2_sub)
elif correlator in ['wind_corr']:
Q = windrose_corr(I1_sub, I2_sub)
elif correlator in ['gaus_phas']:
Q = gaussian_transformed_phase_corr(I1_sub, I2_sub)
elif correlator in ['upsp_corr']:
Q = upsampled_cross_corr(I1_sub, I2_sub)
elif correlator in ['cros_corr']:
Q = cross_corr(I1_sub, I2_sub)
elif correlator in ['robu_corr']:
Q = robust_corr(I1_sub, I2_sub)
elif correlator in ['proj_phas']:
C = projected_phase_corr(I1_sub, I2_sub, M1_sub, M2_sub)
if subpix in phase_based: Q = np.fft.fft2(C)
if (subpix in peak_based) and ('Q' in locals()):
C = np.fft.fftshift(np.real(np.fft.ifft2(Q)))
else:
# spatial correlator
if correlator in ['norm_corr']:
C = normalized_cross_corr(I1_sub, I2_sub)
elif correlator in ['cumu_corr']:
C = cumulative_cross_corr(I1_sub, I2_sub)
elif correlator in ['sq_diff']:
C = -1 * sum_sq_diff(I1_sub, I2_sub)
elif correlator in ['sad_diff']:
C = sum_sad_diff(I1_sub, I2_sub)
elif correlator in ['max_like']:
C = maximum_likelihood(I1_sub, I2_sub)
elif correlator in ['wght_corr']:
C = weighted_normalized_cross_correlation(I1_sub, I2_sub)
if subpix in phase_based:
Q = np.fft.fft2(C)
if (subpix in peak_based) or (subpix is None):
return C
else:
return Q
def estimate_subpixel(QC, subpix, m0=np.zeros((1,2))):
assert type(QC)==np.ndarray, ('please provide an array')
assert type(m0)==np.ndarray, ('please provide an array')
phase_based = list_phase_estimators()
peak_based = list_peak_estimators()
assert ((subpix in phase_based) or (subpix in peak_based)), \
('please provide a valid subpixel method.' +
'it can be one of the following:' +
f' { {*peak_based,*phase_based} }')
if subpix in peak_based: # correlation surface
if subpix in ['gauss_1']:
ddi,ddj,_,_ = get_top_gaussian(QC, top=m0)
elif subpix in ['parab_1']:
ddi,ddj,_,_= get_top_parabolic(QC, top=m0)
elif subpix in ['moment']:
ddi,ddj,_,_= get_top_moment(QC, ds=1, top=m0)
elif subpix in ['mass']:
ddi,ddj,_,_= get_top_mass(QC, top=m0)
elif subpix in ['centroid']:
ddi,ddj,_,_= get_top_centroid(QC, top=m0)
elif subpix in ['blais']:
ddi,ddj,_,_= get_top_blais(QC, top=m0)
elif subpix in ['ren']:
ddi,ddj,_,_= get_top_ren(QC, top=m0)
elif subpix in ['birch']:
ddi,ddj,_,_= get_top_birchfield(QC, top=m0)
elif subpix in ['eqang']:
ddi,ddj,_,_= get_top_equiangular(QC, top=m0)
elif subpix in ['trian']:
ddi,ddj,_,_= get_top_triangular(QC, top=m0)
elif subpix in ['esinc']:
ddi,ddj,_,_= get_top_esinc(QC, ds=1, top=m0)
elif subpix in ['gauss_2']:
ddi,ddj,_,_= get_top_2d_gaussian(QC, ds=1, top=m0)
elif subpix in ['parab_2t']:
ddi,ddj,_,_= get_top_paraboloid(QC, ds=1, top=m0)
elif subpix in phase_based: #cross-spectrum
if subpix in ['tpss']:
W = thresh_masking(QC)
(m,snr) = phase_tpss(QC, W, m0)
ddi, ddj = m[0], m[1]
elif subpix in ['svd']:
W = thresh_masking(QC)
ddi,ddj = phase_svd(QC, W)
elif subpix in ['radon']:
ddi,ddj = phase_radon(QC)
elif subpix in ['hough']:
ddi,ddj = phase_hough(QC)
elif subpix in ['ransac']:
ddi,ddj = phase_ransac(QC)
elif subpix in ['wpca']:
W = thresh_masking(QC)
ddi,ddj = phase_weighted_pca(QC, W)
elif subpix in ['pca']:
ddi,ddj = phase_pca(QC)
elif subpix in ['lsq']:
ddi,ddj = phase_lsq(QC)
elif subpix in ['diff']:
ddi,ddj = phase_difference(QC)
return ddi, ddj
def estimate_precision(C, di, dj, method='gaussian'):
""" given a similarity surface, estimate its matching dispersion
Parameters
----------
C : numpy.array, size=(m,n)
cross correlation function
di,dj : float
locational estimate of the cross-correlation peak, this can also be
negative, see Notes below
method : {'gaussian', 'hessian'}
Returns
-------
cov_ii, cov_jj : float
co-variance estimate
cov_ij : float
off-diagonal co-variance estimate
Notes
-----
It is important to know what type of coordinate systems exist, hence:
.. code-block:: text
coordinate | +--------> collumns
system 'ij'| |
| |
| j | image frame
--------+--------> |
| |
| v
| i rows
v
"""
if method in ['hessian']:
cov_ii,cov_jj,cov_ij = hessian_spread(C,
C.shape[0] // 2 + np.round(di).astype(int),
C.shape[1] // 2 + np.round(dj).astype(int))
else:
cov_ii,cov_jj,cov_ij,_,_ = gauss_spread(C,
C.shape[0]//2 + np.round(di).astype(int),
C.shape[1]//2 + np.round(dj).astype(int),
di-np.round(di), dj-np.round(dj), est='dist')
return cov_ii, cov_jj, cov_ij | en | 0.631301 | # organizational functions # admin list the abbreviations of the different implemented correlators, being: cosi_corr - cosicorr phas_only - phase only correlation symm_phas - symmetric phase correlation ampl_comp - amplitude compensation phase correlation orie_corr - orientation correlation mask_corr - masked normalized cross correlation bina_phas - binary phase correlation wind_corr - windrose correlation gaus_phas - gaussian transformed phase correlation upsp_corr - upsampled cross correlation cros_corr - cross correlation robu_corr - robust phase correlation proj_phas - projected phase correlation phas_corr - phase correlation list the abbreviations of the different implemented correlators, being: norm_corr - normalized cross correlation cumu_corr - cumulative cross correlation sq_diff - sum of squared differences sad_diff - sum of absolute differences max_like - phase correlation wght_corr - weighted normalized cross correlation list the abbreviations of the different implemented phase plane estimation procedures, being: * 'tpss' : two point step size * 'svd' : single value decomposition * 'radon' : radon transform * 'hough' : hough transform * 'ransac' : random sampling and consensus * 'wpca' : weighted principle component analysis * 'pca' : principle component analysis * 'lsq' : least squares estimation * 'diff' : phase difference list the abbreviations of the different implemented for the peak fitting of the similarity function, being: 'gauss_1' : 1D Gaussian fitting 'parab_1' : 1D parabolic fitting 'moment' : 2D moment of the peak 'mass' : 2D center of mass fitting 'centroid' : 2D estimate the centroid 'blais' : 1D estimate through forth order filter 'ren' : 1D parabolic fitting 'birch' : 1D weighted sum 'eqang' : 1D equiangular line fitting 'trian' : 1D triangular fitting 'esinc' : 1D exponential esinc function 'gauss_2' : 2D Gaussian fitting 'parab_2' : 2D parabolic fitting 'optical_flow' : optical flow refinement # todo: include masks # set data outside mask to NaN # set data outside mask to NaN #'lucas_kan' # some sub-pixel methods use the peak of the correlation surface, # while others need the phase of the cross-spectrum # reduce edge effects in frequency space # translational matching/estimation # frequency correlator # spatial correlator # correlation surface #cross-spectrum given a similarity surface, estimate its matching dispersion Parameters ---------- C : numpy.array, size=(m,n) cross correlation function di,dj : float locational estimate of the cross-correlation peak, this can also be negative, see Notes below method : {'gaussian', 'hessian'} Returns ------- cov_ii, cov_jj : float co-variance estimate cov_ij : float off-diagonal co-variance estimate Notes ----- It is important to know what type of coordinate systems exist, hence: .. code-block:: text coordinate | +--------> collumns system 'ij'| | | | | j | image frame --------+--------> | | | | v | i rows v | 1.732714 | 2 |
tests/test_vm.py | eigenein/python-avm2 | 5 | 6618805 | from avm2.runtime import undefined
from avm2.swf.types import DoABCTag, Tag
from avm2.vm import VirtualMachine, execute_do_abc_tag, execute_tag
def test_execute_tag(raw_do_abc_tag: Tag):
execute_tag(raw_do_abc_tag)
def test_execute_do_abc_tag(do_abc_tag: DoABCTag):
execute_do_abc_tag(do_abc_tag)
def test_lookup_class(machine: VirtualMachine):
assert machine.lookup_class('battle.BattleCore') == 2241
assert machine.lookup_class('game.battle.controller.BattleController') == 989
assert machine.lookup_class('game.battle.controller.BattleEnemyReward') == 2308
def test_lookup_method(machine: VirtualMachine):
assert machine.lookup_method('battle.BattleCore.getElementalPenetration') == 24363
assert machine.lookup_method('battle.BattleCore.hitrateIntensity') == 24360
def test_call_get_elemental_penetration(machine: VirtualMachine):
assert machine.call_method('battle.BattleCore.getElementalPenetration', undefined, 2, 300000) == 1
assert machine.call_method('battle.BattleCore.getElementalPenetration', undefined, 42, -100500) == 42
def test_call_hitrate_intensity(machine: VirtualMachine):
assert machine.call_method('battle.BattleCore.hitrateIntensity', undefined, -100, 0) == 1
assert machine.call_method('battle.BattleCore.hitrateIntensity', undefined, 100, 0) == 1
assert machine.call_method('battle.BattleCore.hitrateIntensity', undefined, 0, 100) == 0
assert machine.call_method('battle.BattleCore.hitrateIntensity', undefined, 4, 8) == 0.5
def test_new_battle_enemy_reward(machine: VirtualMachine):
machine.new_instance('game.battle.controller.BattleEnemyReward')
| from avm2.runtime import undefined
from avm2.swf.types import DoABCTag, Tag
from avm2.vm import VirtualMachine, execute_do_abc_tag, execute_tag
def test_execute_tag(raw_do_abc_tag: Tag):
execute_tag(raw_do_abc_tag)
def test_execute_do_abc_tag(do_abc_tag: DoABCTag):
execute_do_abc_tag(do_abc_tag)
def test_lookup_class(machine: VirtualMachine):
assert machine.lookup_class('battle.BattleCore') == 2241
assert machine.lookup_class('game.battle.controller.BattleController') == 989
assert machine.lookup_class('game.battle.controller.BattleEnemyReward') == 2308
def test_lookup_method(machine: VirtualMachine):
assert machine.lookup_method('battle.BattleCore.getElementalPenetration') == 24363
assert machine.lookup_method('battle.BattleCore.hitrateIntensity') == 24360
def test_call_get_elemental_penetration(machine: VirtualMachine):
assert machine.call_method('battle.BattleCore.getElementalPenetration', undefined, 2, 300000) == 1
assert machine.call_method('battle.BattleCore.getElementalPenetration', undefined, 42, -100500) == 42
def test_call_hitrate_intensity(machine: VirtualMachine):
assert machine.call_method('battle.BattleCore.hitrateIntensity', undefined, -100, 0) == 1
assert machine.call_method('battle.BattleCore.hitrateIntensity', undefined, 100, 0) == 1
assert machine.call_method('battle.BattleCore.hitrateIntensity', undefined, 0, 100) == 0
assert machine.call_method('battle.BattleCore.hitrateIntensity', undefined, 4, 8) == 0.5
def test_new_battle_enemy_reward(machine: VirtualMachine):
machine.new_instance('game.battle.controller.BattleEnemyReward')
| none | 1 | 2.183575 | 2 | |
api/gen/device/brand_id_pb2.py | khromiumos/chromiumos-chromite | 0 | 6618806 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: device/brand_id.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='device/brand_id.proto',
package='device',
syntax='proto3',
serialized_options=_b('Z0go.chromium.org/chromiumos/infra/proto/go/device'),
serialized_pb=_b('\n\x15\x64\x65vice/brand_id.proto\x12\x06\x64\x65vice\"\x18\n\x07\x42randId\x12\r\n\x05value\x18\x01 \x01(\tB2Z0go.chromium.org/chromiumos/infra/proto/go/deviceb\x06proto3')
)
_BRANDID = _descriptor.Descriptor(
name='BrandId',
full_name='device.BrandId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='device.BrandId.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=57,
)
DESCRIPTOR.message_types_by_name['BrandId'] = _BRANDID
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BrandId = _reflection.GeneratedProtocolMessageType('BrandId', (_message.Message,), dict(
DESCRIPTOR = _BRANDID,
__module__ = 'device.brand_id_pb2'
# @@protoc_insertion_point(class_scope:device.BrandId)
))
_sym_db.RegisterMessage(BrandId)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: device/brand_id.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='device/brand_id.proto',
package='device',
syntax='proto3',
serialized_options=_b('Z0go.chromium.org/chromiumos/infra/proto/go/device'),
serialized_pb=_b('\n\x15\x64\x65vice/brand_id.proto\x12\x06\x64\x65vice\"\x18\n\x07\x42randId\x12\r\n\x05value\x18\x01 \x01(\tB2Z0go.chromium.org/chromiumos/infra/proto/go/deviceb\x06proto3')
)
_BRANDID = _descriptor.Descriptor(
name='BrandId',
full_name='device.BrandId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value', full_name='device.BrandId.value', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=33,
serialized_end=57,
)
DESCRIPTOR.message_types_by_name['BrandId'] = _BRANDID
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BrandId = _reflection.GeneratedProtocolMessageType('BrandId', (_message.Message,), dict(
DESCRIPTOR = _BRANDID,
__module__ = 'device.brand_id_pb2'
# @@protoc_insertion_point(class_scope:device.BrandId)
))
_sym_db.RegisterMessage(BrandId)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| en | 0.49081 | # Generated by the protocol buffer compiler. DO NOT EDIT! # source: device/brand_id.proto # @@protoc_insertion_point(imports) # @@protoc_insertion_point(class_scope:device.BrandId) # @@protoc_insertion_point(module_scope) | 1.365234 | 1 |
computes/migrations/0001_initial.py | NGXTDN/webvirtcloud | 1,246 | 6618807 | # Generated by Django 2.2.10 on 2020-01-28 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Compute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('hostname', models.CharField(max_length=64)),
('login', models.CharField(max_length=20)),
('password', models.CharField(blank=True, max_length=14, null=True)),
('details', models.CharField(blank=True, max_length=64, null=True)),
('type', models.IntegerField()),
],
),
]
| # Generated by Django 2.2.10 on 2020-01-28 07:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Compute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('hostname', models.CharField(max_length=64)),
('login', models.CharField(max_length=20)),
('password', models.CharField(blank=True, max_length=14, null=True)),
('details', models.CharField(blank=True, max_length=64, null=True)),
('type', models.IntegerField()),
],
),
]
| en | 0.715814 | # Generated by Django 2.2.10 on 2020-01-28 07:01 | 1.858979 | 2 |
cibyl/models/ci/zuul/test_suite.py | rhos-infra/cibyl | 3 | 6618808 | <filename>cibyl/models/ci/zuul/test_suite.py
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from cibyl.models.attribute import AttributeListValue
from cibyl.models.ci.zuul.test import Test, TestStatus
from cibyl.models.model import Model
from cibyl.utils.filtering import apply_filters
class TestSuite(Model):
"""Model for a collection of test cases on a Zuul environment.
@DynamicAttrs: Contains attributes added on runtime.
"""
class Data:
"""Holds the data that will define the model.
"""
name = 'UNKNOWN'
"""Name of the tes collection."""
tests = []
"""The collection of tests hold by the suite."""
url = None
"""Page where more information on the tests can be obtained."""
API = {
'name': {
'attr_type': str,
'arguments': []
},
'tests': {
'attr_type': Test,
'attribute_value_class': AttributeListValue,
'arguments': []
},
'url': {
'attr_type': str,
'arguments': []
}
}
"""Defines base contents of the model."""
def __init__(self, data=Data()):
"""Constructor.
:param data: Defining data for this suite.
:type data: :class:`TestSuite.Data`
"""
super().__init__(
{
'name': data.name,
'tests': data.tests,
'url': data.url
}
)
def __eq__(self, other):
if not isinstance(other, TestSuite):
return False
if self is other:
return True
return \
self.name == other.name and \
self.tests == other.tests and \
self.url == other.url
@property
def test_count(self):
"""
:return: Number of test cases stored on this suite.
:rtype: int
"""
return len(self.tests)
@property
def success_count(self):
"""
:return: Number of successful test cases stored on this suite.
:rtype: int
"""
return len(
apply_filters(
self.tests,
lambda test: test.status == TestStatus.SUCCESS
)
)
@property
def failed_count(self):
"""
:return: Number of failed test cases stored on this suite.
:rtype: int
"""
return len(
apply_filters(
self.tests,
lambda test: test.status == TestStatus.FAILURE
)
)
@property
def skipped_count(self):
"""
:return: Number of ignored test cases stored on this suite.
:rtype: int
"""
return len(
apply_filters(
self.tests,
lambda test: test.status == TestStatus.SKIPPED
)
)
@property
def total_time(self):
"""
:return: Total time it took to run all tests on this suite.
:rtype: float
"""
return sum(test.duration.value for test in self.tests)
| <filename>cibyl/models/ci/zuul/test_suite.py
"""
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
from cibyl.models.attribute import AttributeListValue
from cibyl.models.ci.zuul.test import Test, TestStatus
from cibyl.models.model import Model
from cibyl.utils.filtering import apply_filters
class TestSuite(Model):
"""Model for a collection of test cases on a Zuul environment.
@DynamicAttrs: Contains attributes added on runtime.
"""
class Data:
"""Holds the data that will define the model.
"""
name = 'UNKNOWN'
"""Name of the tes collection."""
tests = []
"""The collection of tests hold by the suite."""
url = None
"""Page where more information on the tests can be obtained."""
API = {
'name': {
'attr_type': str,
'arguments': []
},
'tests': {
'attr_type': Test,
'attribute_value_class': AttributeListValue,
'arguments': []
},
'url': {
'attr_type': str,
'arguments': []
}
}
"""Defines base contents of the model."""
def __init__(self, data=Data()):
"""Constructor.
:param data: Defining data for this suite.
:type data: :class:`TestSuite.Data`
"""
super().__init__(
{
'name': data.name,
'tests': data.tests,
'url': data.url
}
)
def __eq__(self, other):
if not isinstance(other, TestSuite):
return False
if self is other:
return True
return \
self.name == other.name and \
self.tests == other.tests and \
self.url == other.url
@property
def test_count(self):
"""
:return: Number of test cases stored on this suite.
:rtype: int
"""
return len(self.tests)
@property
def success_count(self):
"""
:return: Number of successful test cases stored on this suite.
:rtype: int
"""
return len(
apply_filters(
self.tests,
lambda test: test.status == TestStatus.SUCCESS
)
)
@property
def failed_count(self):
"""
:return: Number of failed test cases stored on this suite.
:rtype: int
"""
return len(
apply_filters(
self.tests,
lambda test: test.status == TestStatus.FAILURE
)
)
@property
def skipped_count(self):
"""
:return: Number of ignored test cases stored on this suite.
:rtype: int
"""
return len(
apply_filters(
self.tests,
lambda test: test.status == TestStatus.SKIPPED
)
)
@property
def total_time(self):
"""
:return: Total time it took to run all tests on this suite.
:rtype: float
"""
return sum(test.duration.value for test in self.tests)
| en | 0.804416 | # Copyright 2022 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Model for a collection of test cases on a Zuul environment. @DynamicAttrs: Contains attributes added on runtime. Holds the data that will define the model. Name of the tes collection. The collection of tests hold by the suite. Page where more information on the tests can be obtained. Defines base contents of the model. Constructor. :param data: Defining data for this suite. :type data: :class:`TestSuite.Data` :return: Number of test cases stored on this suite. :rtype: int :return: Number of successful test cases stored on this suite. :rtype: int :return: Number of failed test cases stored on this suite. :rtype: int :return: Number of ignored test cases stored on this suite. :rtype: int :return: Total time it took to run all tests on this suite. :rtype: float | 2.30024 | 2 |
simulation.py | bbstats/tourney_sim | 1 | 6618809 | from src.brackets import Region, Bracket
from src.stats import (
get_ratings,
get_lehigh_method,
get_srapm_ratings,
get_fivethirtyeight,
get_adjusted_lehigh_method
)
from src.constants import (
west_teams,
west_playin,
east_teams,
east_playin,
south_teams,
south_playin,
midwest_teams,
midwest_playin,
)
from frontend.constants import TLM_NAME, FTE_NAME
def main(num_sims=1000, select_subset=TLM_NAME):
print(select_subset)
if select_subset == TLM_NAME:
ratings_df = get_adjusted_lehigh_method()
ratings = dict(zip(ratings_df["Team"], ratings_df["RAPM"]))
elif select_subset == FTE_NAME:
ratings_df = get_fivethirtyeight()
ratings = dict(zip(ratings_df["team"], ratings_df["rating"]))
west = Region("W", west_teams, west_playin)
east = Region("E", east_teams, east_playin)
south = Region("S", south_teams, south_playin)
midwest = Region("MW", midwest_teams, midwest_playin)
bracket = Bracket(ratings, west, east, south, midwest)
bracket.run_simulations(num_sims=num_sims)
return bracket.output_df
| from src.brackets import Region, Bracket
from src.stats import (
get_ratings,
get_lehigh_method,
get_srapm_ratings,
get_fivethirtyeight,
get_adjusted_lehigh_method
)
from src.constants import (
west_teams,
west_playin,
east_teams,
east_playin,
south_teams,
south_playin,
midwest_teams,
midwest_playin,
)
from frontend.constants import TLM_NAME, FTE_NAME
def main(num_sims=1000, select_subset=TLM_NAME):
print(select_subset)
if select_subset == TLM_NAME:
ratings_df = get_adjusted_lehigh_method()
ratings = dict(zip(ratings_df["Team"], ratings_df["RAPM"]))
elif select_subset == FTE_NAME:
ratings_df = get_fivethirtyeight()
ratings = dict(zip(ratings_df["team"], ratings_df["rating"]))
west = Region("W", west_teams, west_playin)
east = Region("E", east_teams, east_playin)
south = Region("S", south_teams, south_playin)
midwest = Region("MW", midwest_teams, midwest_playin)
bracket = Bracket(ratings, west, east, south, midwest)
bracket.run_simulations(num_sims=num_sims)
return bracket.output_df
| none | 1 | 2.278178 | 2 | |
tests/test_mdnsInterfaceController.py | pkeroulas/nmos-common | 7 | 6618810 | <filename>tests/test_mdnsInterfaceController.py
#!/usr/bin/env python
# Copyright 2017 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nmoscommon.mdns.mdnsInterfaceController import MDNSInterfaceController
from nmoscommon.mdns.mdnsExceptions import NoNetworkInterfacesFoundException
from mock import MagicMock, patch
class TestMDNSInterfaceController(unittest.TestCase):
def setUp(self):
self.logger = MagicMock()
self.dut = MDNSInterfaceController(self.logger)
def test_add_interface(self):
"""Check that adding duplicate interfaces does not produce a duplicate listing"""
with patch('nmoscommon.mdns.mdnsInterfaceController.MDNSInterface') as interface:
self.dut.interfaces = {}
ret = MagicMock()
interface.return_value = ret
self.dut.addInterface("192.168.0.5")
self.assertEqual(self.dut.interfaces, {"192.168.0.5": ret})
self.dut.addInterface("192.168.0.5")
self.assertEqual(self.dut.interfaces, {"192.168.0.5": ret})
def test_get_interfaces(self):
"""Check interfaces can be retrieved"""
expected = self.dut.defaultInterfaces = ["default"]
actual = self.dut.getInterfaces([])
self.assertEqual(expected, actual)
self.dut.addInterface = MagicMock(side_effect=range(0, 5))
actual = self.dut.getInterfaces(["a", "b", "c", "d", "e"])
self.assertEqual(actual, list(range(0, 5)))
def test_close(self):
"""Test the class shuts all its interfaces correctly"""
mockInterface = MagicMock()
closeMethod = MagicMock()
mockInterface.close = closeMethod
testStructure = {
"a": mockInterface,
"b": mockInterface,
"c": mockInterface
}
self.dut.interfaces = testStructure
self.dut.close()
self.assertEqual(closeMethod.call_count, 3)
def test_set_default_interfaces(self):
"""Check that the default interfaces added"""
with patch('nmoscommon.mdns.mdnsInterfaceController.MDNSInterface') as mdns:
with patch('nmoscommon.mdns.mdnsInterfaceController.InterfaceController') as interface:
self.dut.interfaces = {}
ret = MagicMock()
mdns.return_value = ret
interface.return_value.get_default_interfaces.return_value = ['192.168.0.5']
self.dut._set_default_interfaces()
self.assertEqual(self.dut.interfaces, {"192.168.0.5": ret})
def test_set_default_interfaces__raises_exception(self):
"""Check that when no default interfaces are found, an exception is raised"""
with patch('nmoscommon.mdns.mdnsInterfaceController.InterfaceController') as interface:
with self.assertRaises(NoNetworkInterfacesFoundException):
interface.return_value.get_default_interfaces.return_value = []
self.dut._set_default_interfaces()
if __name__ == "__main__":
unittest.main()
| <filename>tests/test_mdnsInterfaceController.py
#!/usr/bin/env python
# Copyright 2017 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from nmoscommon.mdns.mdnsInterfaceController import MDNSInterfaceController
from nmoscommon.mdns.mdnsExceptions import NoNetworkInterfacesFoundException
from mock import MagicMock, patch
class TestMDNSInterfaceController(unittest.TestCase):
def setUp(self):
self.logger = MagicMock()
self.dut = MDNSInterfaceController(self.logger)
def test_add_interface(self):
"""Check that adding duplicate interfaces does not produce a duplicate listing"""
with patch('nmoscommon.mdns.mdnsInterfaceController.MDNSInterface') as interface:
self.dut.interfaces = {}
ret = MagicMock()
interface.return_value = ret
self.dut.addInterface("192.168.0.5")
self.assertEqual(self.dut.interfaces, {"192.168.0.5": ret})
self.dut.addInterface("192.168.0.5")
self.assertEqual(self.dut.interfaces, {"192.168.0.5": ret})
def test_get_interfaces(self):
"""Check interfaces can be retrieved"""
expected = self.dut.defaultInterfaces = ["default"]
actual = self.dut.getInterfaces([])
self.assertEqual(expected, actual)
self.dut.addInterface = MagicMock(side_effect=range(0, 5))
actual = self.dut.getInterfaces(["a", "b", "c", "d", "e"])
self.assertEqual(actual, list(range(0, 5)))
def test_close(self):
"""Test the class shuts all its interfaces correctly"""
mockInterface = MagicMock()
closeMethod = MagicMock()
mockInterface.close = closeMethod
testStructure = {
"a": mockInterface,
"b": mockInterface,
"c": mockInterface
}
self.dut.interfaces = testStructure
self.dut.close()
self.assertEqual(closeMethod.call_count, 3)
def test_set_default_interfaces(self):
"""Check that the default interfaces added"""
with patch('nmoscommon.mdns.mdnsInterfaceController.MDNSInterface') as mdns:
with patch('nmoscommon.mdns.mdnsInterfaceController.InterfaceController') as interface:
self.dut.interfaces = {}
ret = MagicMock()
mdns.return_value = ret
interface.return_value.get_default_interfaces.return_value = ['192.168.0.5']
self.dut._set_default_interfaces()
self.assertEqual(self.dut.interfaces, {"192.168.0.5": ret})
def test_set_default_interfaces__raises_exception(self):
"""Check that when no default interfaces are found, an exception is raised"""
with patch('nmoscommon.mdns.mdnsInterfaceController.InterfaceController') as interface:
with self.assertRaises(NoNetworkInterfacesFoundException):
interface.return_value.get_default_interfaces.return_value = []
self.dut._set_default_interfaces()
if __name__ == "__main__":
unittest.main()
| en | 0.819219 | #!/usr/bin/env python # Copyright 2017 British Broadcasting Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Check that adding duplicate interfaces does not produce a duplicate listing Check interfaces can be retrieved Test the class shuts all its interfaces correctly Check that the default interfaces added Check that when no default interfaces are found, an exception is raised | 2.389924 | 2 |
cartridge/shop/management/__init__.py | readevalprint/cartridge | 1 | 6618811 |
from django.conf import settings
from django.core.management import call_command
from django.db.models.signals import post_syncdb
from mezzanine.utils.tests import copy_test_to_media
from cartridge.shop.models import Product
from cartridge.shop import models as shop_app
def create_initial_product(app, created_models, verbosity, **kwargs):
if Product in created_models:
if kwargs.get("interactive"):
confirm = raw_input("\nWould you like to install an initial "
"Category and Product? (yes/no): ")
while True:
if confirm == "yes":
break
elif confirm == "no":
return
confirm = raw_input("Please enter either 'yes' or 'no': ")
if verbosity >= 1:
print
print "Creating initial Category and Product ..."
print
call_command("loaddata", "cartridge.json")
copy_test_to_media("cartridge.shop", "product")
if not settings.TESTING:
post_syncdb.connect(create_initial_product, sender=shop_app)
|
from django.conf import settings
from django.core.management import call_command
from django.db.models.signals import post_syncdb
from mezzanine.utils.tests import copy_test_to_media
from cartridge.shop.models import Product
from cartridge.shop import models as shop_app
def create_initial_product(app, created_models, verbosity, **kwargs):
if Product in created_models:
if kwargs.get("interactive"):
confirm = raw_input("\nWould you like to install an initial "
"Category and Product? (yes/no): ")
while True:
if confirm == "yes":
break
elif confirm == "no":
return
confirm = raw_input("Please enter either 'yes' or 'no': ")
if verbosity >= 1:
print
print "Creating initial Category and Product ..."
print
call_command("loaddata", "cartridge.json")
copy_test_to_media("cartridge.shop", "product")
if not settings.TESTING:
post_syncdb.connect(create_initial_product, sender=shop_app)
| none | 1 | 2.189264 | 2 | |
BCPGDS_decoder/Update_decoder.py | Mrmoore98/hedwig | 0 | 6618812 | from .Forward_Backward_augment_decoder import *
from .Config import *
from .Config_for_decoder import *
from .GPU_Sampler import *
from .PGBN_sampler import *
#import Forward_augment
import scipy
import numpy as np
import time
import copy
from .Utils import *
def updatePhi_Pi(sweepi, X_train, Params, Data, SuperParams, MBt, Setting, W_left, W_right, epsit):
MBObserved = (sweepi * Setting.batch_num + MBt).astype('int')
# import pdb; pdb.set_trace()
train_doc_batch = Data.train_doc_split[MBt * Setting.batch_size:(MBt + 1) * Setting.batch_size]
Batch_Sparse = Empty()
Batch_Sparse.rows = []
Batch_Sparse.cols = []
Batch_Sparse.values = []
Batch_Sparse.word2sen = []
Batch_Sparse.word2doc = []
Batch_Sparse.sen2doc = []
Batch_Sparse.sen_len = []
Batch_Sparse.doc_len = []
for Doc_index, Doc in enumerate(train_doc_batch):
for Sen_index, Sen in enumerate(Doc):
Batch_Sparse.rows.extend(Sen)
Batch_Sparse.cols.extend([i for i in range(len(Sen))])
Batch_Sparse.values.extend([25 for i in range(len(Sen))])
Batch_Sparse.word2sen.extend(
[len(Batch_Sparse.sen_len) for i in range(len(Sen))]) # the sentence index for word
Batch_Sparse.word2doc.extend([Doc_index for i in range(len(Sen))])
Batch_Sparse.sen2doc.append(Doc_index) # the document index for sentence
Batch_Sparse.sen_len.append(len(Sen)) # the word number for each sentence
Batch_Sparse.doc_len.append(len(Doc)) # the sentence number for each doc
Batch_Sparse.max_doc_len = np.max(np.array(Batch_Sparse.doc_len)) # the max sentence number for each document
# ======================= Setting CPGBN=======================#
Setting.K1_V2 = np.max(np.array(Batch_Sparse.sen_len)) # the max word number for each sentence
Setting.K1_S1 = Setting.K1_V1 - Setting.K1_S3 + 1
Setting.K1_S2 = Setting.K1_V2 - Setting.K1_S4 + 1
Setting.N_Sen = np.max(np.array(Batch_Sparse.word2sen)) + 1 # the number of total sentences
# ======================= Initial Local Params =======================#
# CPGBN
Params.W1_nk1_left = np.random.rand(Setting.N_Sen, Setting.K1, Setting.K1_S1, Setting.K1_S2)
Params.W1_nk1_right = np.random.rand(Setting.N_Sen, Setting.K1, Setting.K1_S1, Setting.K1_S2)
Params.W1_nk1_left = W_left
Params.W1_nk1_right = W_right
Params.W1_nk1 = Params.W1_nk1_left + Params.W1_nk1_right # N*K1*K1_S1*K1_S2
# BPGDS
Theta_knt_left = np.ones([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len]) # K1*Batch_size*T
Theta_knt_right = np.ones([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len]) # K1*Batch_size*T
Theta_knt = np.ones([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len])
Zeta_nt_left = np.ones([Setting.batch_size, Batch_Sparse.max_doc_len + 1]) # Batch_size*T
Zeta_nt_right = np.ones([Setting.batch_size, Batch_Sparse.max_doc_len + 1]) # Batch_size*T
Delta_nt = np.ones([Setting.batch_size, Batch_Sparse.max_doc_len]) # Batch_size*T
c2_nt = np.ones([Setting.batch_size, Batch_Sparse.max_doc_len]) # Batch_size*T
# ===========================Collecting variables==================#
# ======================= GPU Initial =======================#
X_rows = np.array(Batch_Sparse.rows, dtype=np.int32) # rows
X_cols = np.array(Batch_Sparse.cols, dtype=np.int32) # cols
X_values = np.array(Batch_Sparse.values, dtype=np.int32)
X_sen_index = np.array(Batch_Sparse.word2sen, dtype=np.int32) # pages
word_total = len(X_rows) # the number of word
word_aug_stack = np.zeros((Setting.K1 * Setting.K1_S4 * word_total), dtype=np.float32)
MultRate_stack = np.zeros((Setting.K1 * Setting.K1_S4 * word_total), dtype=np.float32)
Batch_Para = np.array([Setting.K1, Setting.K1_S1, Setting.K1_S2, Setting.K1_S3, Setting.K1_S4, word_total],
dtype=np.int32)
block_x = 64
grid_x = 64
grid_y = word_total / (block_x * grid_x) + 1
time_Conv = 0
# ====================== Augmentation ======================#
Params.D1_k1_Aug = np.zeros_like(Params.D1_k1)
Params.W1_nk1_Aug = np.zeros_like(Params.W1_nk1)
W1_nk1 = np.array(Params.W1_nk1, dtype=np.float32, order='C')
D1_k1 = np.array(Params.D1_k1, dtype=np.float32, order='C')
W1_nk1_Aug = np.zeros(W1_nk1.shape, dtype=np.float32, order='C')
D1_k1_Aug = np.zeros(D1_k1.shape, dtype=np.float32, order='C')
time_1 = time.time()
fuc = mod.get_function("Multi_Sampler")
fuc(drv.In(Batch_Para), drv.In(word_aug_stack), drv.In(MultRate_stack), drv.In(X_rows), drv.In(X_cols),
drv.In(X_sen_index),
drv.In(X_values), drv.In(W1_nk1), drv.In(D1_k1), drv.InOut(W1_nk1_Aug), drv.InOut(D1_k1_Aug),
grid=(int(grid_x), int(grid_y), 1), block=(int(block_x), 1, 1))
time_2 = time.time()
time_Conv += time_2 - time_1
Params.W1_nk1_Aug = W1_nk1_Aug # N*K1*K1_S1*K1_S2; Note: Don't add round here, case the scores are too small here!!!
Params.D1_k1_Aug = D1_k1_Aug # K1*K1_S3*K1_S4
Params.W1_nk1_Aug_Pool = np.sum(np.sum(Params.W1_nk1_Aug, axis=3, keepdims=True), axis=2, keepdims=True) # N*K1
Params.W1_nk1_Aug_Rate = Params.W1_nk1_Aug / (Params.W1_nk1_Aug_Pool + real_min) # N*K1*K1_S1*K1_S2
# ====================== Augmentation ======================#
# ======================separate forward and backward ===============#
Params.W1_nk1_Aug_Pool_left = np.sum(Params.W1_nk1_left, axis=3, keepdims=True) / (
np.sum(Params.W1_nk1, axis=3, keepdims=True) + real_min) * Params.W1_nk1_Aug_Pool
Params.W1_nk1_Aug_Pool_right = np.sum(Params.W1_nk1_right, axis=3, keepdims=True) / (
np.sum(Params.W1_nk1, axis=3, keepdims=True) + real_min) * Params.W1_nk1_Aug_Pool
A_knt_left = np.zeros([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len]) # K1*Batch_size*T
A_knt_right = np.zeros([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len])
for n in range(Setting.batch_size):
A_sen_index = np.array(np.where(np.array(Batch_Sparse.sen2doc) == n))
A_kt_left = np.transpose(Params.W1_nk1_Aug_Pool_left[A_sen_index[0, :], :, 0, 0]) # K1*T
A_knt_left[:, n, -Batch_Sparse.doc_len[n]:] = A_kt_left # K1*Batch_size*T
A_kt_right = np.transpose(Params.W1_nk1_Aug_Pool[A_sen_index[0, :], :, 0, 0]) # K1*T
A_knt_right[:, n, -Batch_Sparse.doc_len[n]:] = A_kt_right # K1*Batch_size*T
##=============foward============##
[Zeta_nt_left, Z_kkdot_left] = \
Forward_augment(Setting, SuperParams, Batch_Sparse, A_knt_left, Params, Theta_knt_left, Zeta_nt_left,
Delta_nt, c2_nt)
##=============backward============##
[Zeta_nt_right, Z_kkdot_right] = \
Backward_augment(Setting, SuperParams, Batch_Sparse, A_knt_right, Params, Theta_knt_right,
Zeta_nt_right, Delta_nt, c2_nt)
EWSZS_D = Params.D1_k1_Aug
EWSZS_Pi_left = Z_kkdot_left
EWSZS_Pi_right = Z_kkdot_right
Phi = np.transpose(np.reshape(Params.D1_k1, [Setting.K1, Setting.K1_S3 * Setting.K1_S4]))
EWSZS_D = np.transpose(np.reshape(EWSZS_D, [Setting.K1, Setting.K1_S3 * Setting.K1_S4]))
EWSZS_D = Setting.batch_num * EWSZS_D / Setting.Collection
EWSZS_Pi_left = Setting.batch_num * EWSZS_Pi_left / Setting.Collection
EWSZS_Pi_right = Setting.batch_num * EWSZS_Pi_right / Setting.Collection
if (MBObserved == 0):
NDot_D = EWSZS_D.sum(0)
NDot_Pi_left = EWSZS_Pi_left.sum(0)
NDot_Pi_right = EWSZS_Pi_left.sum(0)
else:
NDot_D = (1 - Setting.ForgetRate[MBObserved]) * NDot_D + Setting.ForgetRate[MBObserved] * EWSZS_D.sum(0)
NDot_Pi_left = (1 - Setting.ForgetRate[MBObserved]) * NDot_Pi_left + Setting.ForgetRate[
MBObserved] * EWSZS_Pi_left.sum(0)
NDot_Pi_right = (1 - Setting.ForgetRate[MBObserved]) * NDot_Pi_right + Setting.ForgetRate[
MBObserved] * EWSZS_Pi_right.sum(0)
# Update D
tmp = EWSZS_D + SuperParams.eta
tmp = (1 / (NDot_D + real_min)) * (tmp - tmp.sum(0) * Phi)
tmp1 = (2 / (NDot_D + real_min)) * Phi
tmp = Phi + epsit[MBObserved] * tmp + np.sqrt(epsit[MBObserved] * tmp1) * np.random.randn(Phi.shape[0],
Phi.shape[1])
Phi = PGBN_sampler.ProjSimplexSpecial(tmp, Phi, 0)
Params.D1_k1 = np.reshape(np.transpose(Phi), [Setting.K1, Setting.K1_S3, Setting.K1_S4])
# Update Pi_left
Pi_prior = np.eye(Setting.K1)
# Pi_prior = np.dot(Params.V, np.transpose(Params.V))
# Pi_prior[np.arange(K), np.arange(K)] = 0
# Pi_prior = Pi_prior + np.diag(np.reshape(Params.Xi*Params.V, [K, 1]))
tmp = EWSZS_Pi_left + Pi_prior
tmp = (1 / (NDot_Pi_left + real_min)) * (tmp - tmp.sum(0) * Params.Pi_left)
tmp1 = (2 / (NDot_Pi_left + real_min)) * Params.Pi_left
tmp = Params.Pi_left + epsit[MBObserved] * tmp + np.sqrt(epsit[MBObserved] * tmp1) * np.random.randn(
Params.Pi_left.shape[0], Params.Pi_left.shape[1])
Params.Pi_left = PGBN_sampler.ProjSimplexSpecial(tmp, Params.Pi_left, 0)
# Update Pi_right
Pi_prior = np.eye(Setting.K1)
# Pi_prior = np.dot(Params.V, np.transpose(Params.V))
# Pi_prior[np.arange(K), np.arange(K)] = 0
# Pi_prior = Pi_prior + np.diag(np.reshape(Params.Xi*Params.V, [K, 1]))
tmp = EWSZS_Pi_right + Pi_prior
tmp = (1 / (NDot_Pi_right + real_min)) * (tmp - tmp.sum(0) * Params.Pi_right)
tmp1 = (2 / (NDot_Pi_right + real_min)) * Params.Pi_right
tmp = Params.Pi_right + epsit[MBObserved] * tmp + np.sqrt(epsit[MBObserved] * tmp1) * np.random.randn(
Params.Pi_right.shape[0], Params.Pi_right.shape[1])
Params.Pi_right = PGBN_sampler.ProjSimplexSpecial(tmp, Params.Pi_right, 0)
return Params.D1_k1, Params.Pi_left, Params.Pi_right
| from .Forward_Backward_augment_decoder import *
from .Config import *
from .Config_for_decoder import *
from .GPU_Sampler import *
from .PGBN_sampler import *
#import Forward_augment
import scipy
import numpy as np
import time
import copy
from .Utils import *
def updatePhi_Pi(sweepi, X_train, Params, Data, SuperParams, MBt, Setting, W_left, W_right, epsit):
MBObserved = (sweepi * Setting.batch_num + MBt).astype('int')
# import pdb; pdb.set_trace()
train_doc_batch = Data.train_doc_split[MBt * Setting.batch_size:(MBt + 1) * Setting.batch_size]
Batch_Sparse = Empty()
Batch_Sparse.rows = []
Batch_Sparse.cols = []
Batch_Sparse.values = []
Batch_Sparse.word2sen = []
Batch_Sparse.word2doc = []
Batch_Sparse.sen2doc = []
Batch_Sparse.sen_len = []
Batch_Sparse.doc_len = []
for Doc_index, Doc in enumerate(train_doc_batch):
for Sen_index, Sen in enumerate(Doc):
Batch_Sparse.rows.extend(Sen)
Batch_Sparse.cols.extend([i for i in range(len(Sen))])
Batch_Sparse.values.extend([25 for i in range(len(Sen))])
Batch_Sparse.word2sen.extend(
[len(Batch_Sparse.sen_len) for i in range(len(Sen))]) # the sentence index for word
Batch_Sparse.word2doc.extend([Doc_index for i in range(len(Sen))])
Batch_Sparse.sen2doc.append(Doc_index) # the document index for sentence
Batch_Sparse.sen_len.append(len(Sen)) # the word number for each sentence
Batch_Sparse.doc_len.append(len(Doc)) # the sentence number for each doc
Batch_Sparse.max_doc_len = np.max(np.array(Batch_Sparse.doc_len)) # the max sentence number for each document
# ======================= Setting CPGBN=======================#
Setting.K1_V2 = np.max(np.array(Batch_Sparse.sen_len)) # the max word number for each sentence
Setting.K1_S1 = Setting.K1_V1 - Setting.K1_S3 + 1
Setting.K1_S2 = Setting.K1_V2 - Setting.K1_S4 + 1
Setting.N_Sen = np.max(np.array(Batch_Sparse.word2sen)) + 1 # the number of total sentences
# ======================= Initial Local Params =======================#
# CPGBN
Params.W1_nk1_left = np.random.rand(Setting.N_Sen, Setting.K1, Setting.K1_S1, Setting.K1_S2)
Params.W1_nk1_right = np.random.rand(Setting.N_Sen, Setting.K1, Setting.K1_S1, Setting.K1_S2)
Params.W1_nk1_left = W_left
Params.W1_nk1_right = W_right
Params.W1_nk1 = Params.W1_nk1_left + Params.W1_nk1_right # N*K1*K1_S1*K1_S2
# BPGDS
Theta_knt_left = np.ones([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len]) # K1*Batch_size*T
Theta_knt_right = np.ones([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len]) # K1*Batch_size*T
Theta_knt = np.ones([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len])
Zeta_nt_left = np.ones([Setting.batch_size, Batch_Sparse.max_doc_len + 1]) # Batch_size*T
Zeta_nt_right = np.ones([Setting.batch_size, Batch_Sparse.max_doc_len + 1]) # Batch_size*T
Delta_nt = np.ones([Setting.batch_size, Batch_Sparse.max_doc_len]) # Batch_size*T
c2_nt = np.ones([Setting.batch_size, Batch_Sparse.max_doc_len]) # Batch_size*T
# ===========================Collecting variables==================#
# ======================= GPU Initial =======================#
X_rows = np.array(Batch_Sparse.rows, dtype=np.int32) # rows
X_cols = np.array(Batch_Sparse.cols, dtype=np.int32) # cols
X_values = np.array(Batch_Sparse.values, dtype=np.int32)
X_sen_index = np.array(Batch_Sparse.word2sen, dtype=np.int32) # pages
word_total = len(X_rows) # the number of word
word_aug_stack = np.zeros((Setting.K1 * Setting.K1_S4 * word_total), dtype=np.float32)
MultRate_stack = np.zeros((Setting.K1 * Setting.K1_S4 * word_total), dtype=np.float32)
Batch_Para = np.array([Setting.K1, Setting.K1_S1, Setting.K1_S2, Setting.K1_S3, Setting.K1_S4, word_total],
dtype=np.int32)
block_x = 64
grid_x = 64
grid_y = word_total / (block_x * grid_x) + 1
time_Conv = 0
# ====================== Augmentation ======================#
Params.D1_k1_Aug = np.zeros_like(Params.D1_k1)
Params.W1_nk1_Aug = np.zeros_like(Params.W1_nk1)
W1_nk1 = np.array(Params.W1_nk1, dtype=np.float32, order='C')
D1_k1 = np.array(Params.D1_k1, dtype=np.float32, order='C')
W1_nk1_Aug = np.zeros(W1_nk1.shape, dtype=np.float32, order='C')
D1_k1_Aug = np.zeros(D1_k1.shape, dtype=np.float32, order='C')
time_1 = time.time()
fuc = mod.get_function("Multi_Sampler")
fuc(drv.In(Batch_Para), drv.In(word_aug_stack), drv.In(MultRate_stack), drv.In(X_rows), drv.In(X_cols),
drv.In(X_sen_index),
drv.In(X_values), drv.In(W1_nk1), drv.In(D1_k1), drv.InOut(W1_nk1_Aug), drv.InOut(D1_k1_Aug),
grid=(int(grid_x), int(grid_y), 1), block=(int(block_x), 1, 1))
time_2 = time.time()
time_Conv += time_2 - time_1
Params.W1_nk1_Aug = W1_nk1_Aug # N*K1*K1_S1*K1_S2; Note: Don't add round here, case the scores are too small here!!!
Params.D1_k1_Aug = D1_k1_Aug # K1*K1_S3*K1_S4
Params.W1_nk1_Aug_Pool = np.sum(np.sum(Params.W1_nk1_Aug, axis=3, keepdims=True), axis=2, keepdims=True) # N*K1
Params.W1_nk1_Aug_Rate = Params.W1_nk1_Aug / (Params.W1_nk1_Aug_Pool + real_min) # N*K1*K1_S1*K1_S2
# ====================== Augmentation ======================#
# ======================separate forward and backward ===============#
Params.W1_nk1_Aug_Pool_left = np.sum(Params.W1_nk1_left, axis=3, keepdims=True) / (
np.sum(Params.W1_nk1, axis=3, keepdims=True) + real_min) * Params.W1_nk1_Aug_Pool
Params.W1_nk1_Aug_Pool_right = np.sum(Params.W1_nk1_right, axis=3, keepdims=True) / (
np.sum(Params.W1_nk1, axis=3, keepdims=True) + real_min) * Params.W1_nk1_Aug_Pool
A_knt_left = np.zeros([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len]) # K1*Batch_size*T
A_knt_right = np.zeros([Setting.K1, Setting.batch_size, Batch_Sparse.max_doc_len])
for n in range(Setting.batch_size):
A_sen_index = np.array(np.where(np.array(Batch_Sparse.sen2doc) == n))
A_kt_left = np.transpose(Params.W1_nk1_Aug_Pool_left[A_sen_index[0, :], :, 0, 0]) # K1*T
A_knt_left[:, n, -Batch_Sparse.doc_len[n]:] = A_kt_left # K1*Batch_size*T
A_kt_right = np.transpose(Params.W1_nk1_Aug_Pool[A_sen_index[0, :], :, 0, 0]) # K1*T
A_knt_right[:, n, -Batch_Sparse.doc_len[n]:] = A_kt_right # K1*Batch_size*T
##=============foward============##
[Zeta_nt_left, Z_kkdot_left] = \
Forward_augment(Setting, SuperParams, Batch_Sparse, A_knt_left, Params, Theta_knt_left, Zeta_nt_left,
Delta_nt, c2_nt)
##=============backward============##
[Zeta_nt_right, Z_kkdot_right] = \
Backward_augment(Setting, SuperParams, Batch_Sparse, A_knt_right, Params, Theta_knt_right,
Zeta_nt_right, Delta_nt, c2_nt)
EWSZS_D = Params.D1_k1_Aug
EWSZS_Pi_left = Z_kkdot_left
EWSZS_Pi_right = Z_kkdot_right
Phi = np.transpose(np.reshape(Params.D1_k1, [Setting.K1, Setting.K1_S3 * Setting.K1_S4]))
EWSZS_D = np.transpose(np.reshape(EWSZS_D, [Setting.K1, Setting.K1_S3 * Setting.K1_S4]))
EWSZS_D = Setting.batch_num * EWSZS_D / Setting.Collection
EWSZS_Pi_left = Setting.batch_num * EWSZS_Pi_left / Setting.Collection
EWSZS_Pi_right = Setting.batch_num * EWSZS_Pi_right / Setting.Collection
if (MBObserved == 0):
NDot_D = EWSZS_D.sum(0)
NDot_Pi_left = EWSZS_Pi_left.sum(0)
NDot_Pi_right = EWSZS_Pi_left.sum(0)
else:
NDot_D = (1 - Setting.ForgetRate[MBObserved]) * NDot_D + Setting.ForgetRate[MBObserved] * EWSZS_D.sum(0)
NDot_Pi_left = (1 - Setting.ForgetRate[MBObserved]) * NDot_Pi_left + Setting.ForgetRate[
MBObserved] * EWSZS_Pi_left.sum(0)
NDot_Pi_right = (1 - Setting.ForgetRate[MBObserved]) * NDot_Pi_right + Setting.ForgetRate[
MBObserved] * EWSZS_Pi_right.sum(0)
# Update D
tmp = EWSZS_D + SuperParams.eta
tmp = (1 / (NDot_D + real_min)) * (tmp - tmp.sum(0) * Phi)
tmp1 = (2 / (NDot_D + real_min)) * Phi
tmp = Phi + epsit[MBObserved] * tmp + np.sqrt(epsit[MBObserved] * tmp1) * np.random.randn(Phi.shape[0],
Phi.shape[1])
Phi = PGBN_sampler.ProjSimplexSpecial(tmp, Phi, 0)
Params.D1_k1 = np.reshape(np.transpose(Phi), [Setting.K1, Setting.K1_S3, Setting.K1_S4])
# Update Pi_left
Pi_prior = np.eye(Setting.K1)
# Pi_prior = np.dot(Params.V, np.transpose(Params.V))
# Pi_prior[np.arange(K), np.arange(K)] = 0
# Pi_prior = Pi_prior + np.diag(np.reshape(Params.Xi*Params.V, [K, 1]))
tmp = EWSZS_Pi_left + Pi_prior
tmp = (1 / (NDot_Pi_left + real_min)) * (tmp - tmp.sum(0) * Params.Pi_left)
tmp1 = (2 / (NDot_Pi_left + real_min)) * Params.Pi_left
tmp = Params.Pi_left + epsit[MBObserved] * tmp + np.sqrt(epsit[MBObserved] * tmp1) * np.random.randn(
Params.Pi_left.shape[0], Params.Pi_left.shape[1])
Params.Pi_left = PGBN_sampler.ProjSimplexSpecial(tmp, Params.Pi_left, 0)
# Update Pi_right
Pi_prior = np.eye(Setting.K1)
# Pi_prior = np.dot(Params.V, np.transpose(Params.V))
# Pi_prior[np.arange(K), np.arange(K)] = 0
# Pi_prior = Pi_prior + np.diag(np.reshape(Params.Xi*Params.V, [K, 1]))
tmp = EWSZS_Pi_right + Pi_prior
tmp = (1 / (NDot_Pi_right + real_min)) * (tmp - tmp.sum(0) * Params.Pi_right)
tmp1 = (2 / (NDot_Pi_right + real_min)) * Params.Pi_right
tmp = Params.Pi_right + epsit[MBObserved] * tmp + np.sqrt(epsit[MBObserved] * tmp1) * np.random.randn(
Params.Pi_right.shape[0], Params.Pi_right.shape[1])
Params.Pi_right = PGBN_sampler.ProjSimplexSpecial(tmp, Params.Pi_right, 0)
return Params.D1_k1, Params.Pi_left, Params.Pi_right
| en | 0.559745 | #import Forward_augment # import pdb; pdb.set_trace() # the sentence index for word # the document index for sentence # the word number for each sentence # the sentence number for each doc # the max sentence number for each document # ======================= Setting CPGBN=======================# # the max word number for each sentence # the number of total sentences # ======================= Initial Local Params =======================# # CPGBN # N*K1*K1_S1*K1_S2 # BPGDS # K1*Batch_size*T # K1*Batch_size*T # Batch_size*T # Batch_size*T # Batch_size*T # Batch_size*T # ===========================Collecting variables==================# # ======================= GPU Initial =======================# # rows # cols # pages # the number of word # ====================== Augmentation ======================# # N*K1*K1_S1*K1_S2; Note: Don't add round here, case the scores are too small here!!! # K1*K1_S3*K1_S4 # N*K1 # N*K1*K1_S1*K1_S2 # ====================== Augmentation ======================# # ======================separate forward and backward ===============# # K1*Batch_size*T # K1*T # K1*Batch_size*T # K1*T # K1*Batch_size*T ##=============foward============## ##=============backward============## # Update D # Update Pi_left # Pi_prior = np.dot(Params.V, np.transpose(Params.V)) # Pi_prior[np.arange(K), np.arange(K)] = 0 # Pi_prior = Pi_prior + np.diag(np.reshape(Params.Xi*Params.V, [K, 1])) # Update Pi_right # Pi_prior = np.dot(Params.V, np.transpose(Params.V)) # Pi_prior[np.arange(K), np.arange(K)] = 0 # Pi_prior = Pi_prior + np.diag(np.reshape(Params.Xi*Params.V, [K, 1])) | 1.960674 | 2 |
discord_exchange/orderbook/trade.py | MiltFra/discord-exchange | 0 | 6618813 | class Trade:
num_trades = 0
def __init__(self, buyer, seller, price, volume) -> None:
assert volume > 0
self.buyer = buyer
self.seller = seller
self.price = price
self.volume = volume
self.id = Trade.num_trades
Trade.num_trades += 1
def binary_value(self, theo):
if self.price > theo:
return -self.volume
elif self.price < theo:
return self.volume
return 0
def true_value(self, theo):
return self.volume * (theo - self.price)
def __str__(self) -> str:
return f"#{self.id} {self.seller}->{self.buyer},{self.volume}@{self.price}" | class Trade:
num_trades = 0
def __init__(self, buyer, seller, price, volume) -> None:
assert volume > 0
self.buyer = buyer
self.seller = seller
self.price = price
self.volume = volume
self.id = Trade.num_trades
Trade.num_trades += 1
def binary_value(self, theo):
if self.price > theo:
return -self.volume
elif self.price < theo:
return self.volume
return 0
def true_value(self, theo):
return self.volume * (theo - self.price)
def __str__(self) -> str:
return f"#{self.id} {self.seller}->{self.buyer},{self.volume}@{self.price}" | none | 1 | 3.572273 | 4 | |
Snippets/TurtlePatterns.py | aytona/ProgrammingPrinciples | 0 | 6618814 | <reponame>aytona/ProgrammingPrinciples
# This program draws patterns using Turtle
import turtle
def makeScreen():
m_wn = turtle.Screen()
return m_wn
def makeTurtle(n_colour, n_shape='turtle', n_size=1, n_initPos=(0,0)):
m_turtle = turtle.Turtle(shape=n_shape)
m_turtle.color(n_colour)
m_turtle.pensize(n_size)
m_turtle.penup()
m_turtle.goto(n_initPos[0], n_initPos[1])
m_turtle.pendown()
return m_turtle
def flowerPattern(turt):
for i in range(24):
turt.right(15)
for x in range(36):
turt.left(10)
turt.forward(15)
def spiralPattern(turt):
for distance in range(100):
turt.forward(distance)
turt.left(20)
def spiralSticksPattern(turt):
turt.begin_fill()
for n in range(8):
for i in range(2):
turt.forward(100)
turt.left(90)
turt.forward(10)
turt.left(90)
turt.left(45)
turt.end_fill()
def main():
wn = makeScreen()
flowerTurt = makeTurtle('blue', n_initPos=(-250, 250))
flowerPattern(flowerTurt)
spiralTurt = makeTurtle('red', n_initPos=(250, 250))
spiralPattern(spiralTurt)
stickTurt = makeTurtle('green', n_initPos=(0, -100))
spiralSticksPattern(stickTurt)
wn.exitonclick()
main() | # This program draws patterns using Turtle
import turtle
def makeScreen():
m_wn = turtle.Screen()
return m_wn
def makeTurtle(n_colour, n_shape='turtle', n_size=1, n_initPos=(0,0)):
m_turtle = turtle.Turtle(shape=n_shape)
m_turtle.color(n_colour)
m_turtle.pensize(n_size)
m_turtle.penup()
m_turtle.goto(n_initPos[0], n_initPos[1])
m_turtle.pendown()
return m_turtle
def flowerPattern(turt):
for i in range(24):
turt.right(15)
for x in range(36):
turt.left(10)
turt.forward(15)
def spiralPattern(turt):
for distance in range(100):
turt.forward(distance)
turt.left(20)
def spiralSticksPattern(turt):
turt.begin_fill()
for n in range(8):
for i in range(2):
turt.forward(100)
turt.left(90)
turt.forward(10)
turt.left(90)
turt.left(45)
turt.end_fill()
def main():
wn = makeScreen()
flowerTurt = makeTurtle('blue', n_initPos=(-250, 250))
flowerPattern(flowerTurt)
spiralTurt = makeTurtle('red', n_initPos=(250, 250))
spiralPattern(spiralTurt)
stickTurt = makeTurtle('green', n_initPos=(0, -100))
spiralSticksPattern(stickTurt)
wn.exitonclick()
main() | en | 0.691862 | # This program draws patterns using Turtle | 4.147994 | 4 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credentials/tests/test_utils.py | osoco/better-ways-of-thinking-about-software | 3 | 6618815 | <filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credentials/tests/test_utils.py
"""Tests covering Credentials utilities."""
import uuid
from unittest import mock
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.credentials.tests import factories
from openedx.core.djangoapps.credentials.tests.mixins import CredentialsApiConfigMixin
from openedx.core.djangoapps.credentials.utils import get_credentials
from openedx.core.djangoapps.oauth_dispatch.tests.factories import ApplicationFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from common.djangoapps.student.tests.factories import UserFactory
UTILS_MODULE = 'openedx.core.djangoapps.credentials.utils'
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetCredentials(CredentialsApiConfigMixin, CacheIsolationTestCase):
""" Tests for credentials utility functions. """
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
ApplicationFactory(name=CredentialsApiConfig.OAUTH2_CLIENT_NAME)
self.credentials_config = self.create_credentials_config(cache_ttl=1)
self.user = UserFactory()
def test_get_many(self, mock_get_edx_api_data):
expected = factories.UserCredential.create_batch(3)
mock_get_edx_api_data.return_value = expected
actual = get_credentials(self.user)
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
}
cache_key = f'{self.credentials_config.CACHE_KEY}.{self.user.username}'
assert kwargs['querystring'] == querystring
assert kwargs['cache_key'] == cache_key
assert actual == expected
def test_get_one(self, mock_get_edx_api_data):
expected = factories.UserCredential()
mock_get_edx_api_data.return_value = expected
program_uuid = str(uuid.uuid4())
actual = get_credentials(self.user, program_uuid=program_uuid)
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
'program_uuid': program_uuid,
}
cache_key = f'{self.credentials_config.CACHE_KEY}.{self.user.username}.{program_uuid}'
assert kwargs['querystring'] == querystring
assert kwargs['cache_key'] == cache_key
assert actual == expected
def test_type_filter(self, mock_get_edx_api_data):
get_credentials(self.user, credential_type='program')
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
'type': 'program',
}
assert kwargs['querystring'] == querystring
| <filename>Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/credentials/tests/test_utils.py
"""Tests covering Credentials utilities."""
import uuid
from unittest import mock
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.credentials.tests import factories
from openedx.core.djangoapps.credentials.tests.mixins import CredentialsApiConfigMixin
from openedx.core.djangoapps.credentials.utils import get_credentials
from openedx.core.djangoapps.oauth_dispatch.tests.factories import ApplicationFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from common.djangoapps.student.tests.factories import UserFactory
UTILS_MODULE = 'openedx.core.djangoapps.credentials.utils'
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetCredentials(CredentialsApiConfigMixin, CacheIsolationTestCase):
""" Tests for credentials utility functions. """
ENABLED_CACHES = ['default']
def setUp(self):
super().setUp()
ApplicationFactory(name=CredentialsApiConfig.OAUTH2_CLIENT_NAME)
self.credentials_config = self.create_credentials_config(cache_ttl=1)
self.user = UserFactory()
def test_get_many(self, mock_get_edx_api_data):
expected = factories.UserCredential.create_batch(3)
mock_get_edx_api_data.return_value = expected
actual = get_credentials(self.user)
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
}
cache_key = f'{self.credentials_config.CACHE_KEY}.{self.user.username}'
assert kwargs['querystring'] == querystring
assert kwargs['cache_key'] == cache_key
assert actual == expected
def test_get_one(self, mock_get_edx_api_data):
expected = factories.UserCredential()
mock_get_edx_api_data.return_value = expected
program_uuid = str(uuid.uuid4())
actual = get_credentials(self.user, program_uuid=program_uuid)
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
'program_uuid': program_uuid,
}
cache_key = f'{self.credentials_config.CACHE_KEY}.{self.user.username}.{program_uuid}'
assert kwargs['querystring'] == querystring
assert kwargs['cache_key'] == cache_key
assert actual == expected
def test_type_filter(self, mock_get_edx_api_data):
get_credentials(self.user, credential_type='program')
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
'type': 'program',
}
assert kwargs['querystring'] == querystring
| en | 0.880621 | Tests covering Credentials utilities. Tests for credentials utility functions. | 2.378982 | 2 |
snowblocks/nixpkgs/blender/pie-essentials/assets/brushes.py | eadwu/dotfiles | 0 | 6618816 | <reponame>eadwu/dotfiles<gh_stars>0
bl_info = {
"name": "Sculpt Brush Menu: Key: 'F'",
"description": "Sculpt Brushes",
"blender": (2, 78, 0),
"category": "Sculpt"
}
import bpy
from bpy.types import (Menu, Operator)
class VIEW3D_PIE_brushes_of(Menu):
bl_label = "Brushes"
bl_idname = "pie.brushes_of"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
draw = pie.operator("paint.brush_select", icon = "BRUSH_SCULPT_DRAW", text = "Draw")
draw.sculpt_tool = 'DRAW'
crease = pie.operator("paint.brush_select", icon = "BRUSH_CREASE", text = "Crease")
crease.sculpt_tool = 'CREASE'
clay = pie.operator("paint.brush_select", icon = "BRUSH_CLAY", text = "Clay")
clay.sculpt_tool = 'CLAY'
clay_strips = pie.operator("paint.brush_select", icon = "BRUSH_CLAY_STRIPS", text = "Clay Strips")
clay_strips.sculpt_tool = 'CLAY_STRIPS'
grab = pie.operator("paint.brush_select", icon = "BRUSH_GRAB", text = "Grab")
grab.sculpt_tool = 'GRAB'
pinch = pie.operator("paint.brush_select", icon = "BRUSH_PINCH", text = "Pinch")
pinch.sculpt_tool = 'PINCH'
smooth = pie.operator("paint.brush_select", icon = "BRUSH_SMOOTH", text = "Smooth")
smooth.sculpt_tool = 'SMOOTH'
# pie.operator_enum("PAINT_OT_brush_select", "sculpt_tool")
classes = [VIEW3D_PIE_brushes_of]
addon_keymaps = []
def register():
addon_keymaps.clear()
for cls in classes:
bpy.utils.register_class(cls)
wm = bpy.context.window_manager
if wm.keyconfigs.addon:
km = wm.keyconfigs.addon.keymaps.new(name = 'Sculpt')
kmi = km.keymap_items.new('wm.call_menu_pie', 'F', 'PRESS')
kmi.properties.name = "pie.brushes_of"
addon_keymaps.append((km, kmi))
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
if __name__ == "__main__":
register()
| bl_info = {
"name": "Sculpt Brush Menu: Key: 'F'",
"description": "Sculpt Brushes",
"blender": (2, 78, 0),
"category": "Sculpt"
}
import bpy
from bpy.types import (Menu, Operator)
class VIEW3D_PIE_brushes_of(Menu):
bl_label = "Brushes"
bl_idname = "pie.brushes_of"
def draw(self, context):
layout = self.layout
pie = layout.menu_pie()
draw = pie.operator("paint.brush_select", icon = "BRUSH_SCULPT_DRAW", text = "Draw")
draw.sculpt_tool = 'DRAW'
crease = pie.operator("paint.brush_select", icon = "BRUSH_CREASE", text = "Crease")
crease.sculpt_tool = 'CREASE'
clay = pie.operator("paint.brush_select", icon = "BRUSH_CLAY", text = "Clay")
clay.sculpt_tool = 'CLAY'
clay_strips = pie.operator("paint.brush_select", icon = "BRUSH_CLAY_STRIPS", text = "Clay Strips")
clay_strips.sculpt_tool = 'CLAY_STRIPS'
grab = pie.operator("paint.brush_select", icon = "BRUSH_GRAB", text = "Grab")
grab.sculpt_tool = 'GRAB'
pinch = pie.operator("paint.brush_select", icon = "BRUSH_PINCH", text = "Pinch")
pinch.sculpt_tool = 'PINCH'
smooth = pie.operator("paint.brush_select", icon = "BRUSH_SMOOTH", text = "Smooth")
smooth.sculpt_tool = 'SMOOTH'
# pie.operator_enum("PAINT_OT_brush_select", "sculpt_tool")
classes = [VIEW3D_PIE_brushes_of]
addon_keymaps = []
def register():
addon_keymaps.clear()
for cls in classes:
bpy.utils.register_class(cls)
wm = bpy.context.window_manager
if wm.keyconfigs.addon:
km = wm.keyconfigs.addon.keymaps.new(name = 'Sculpt')
kmi = km.keymap_items.new('wm.call_menu_pie', 'F', 'PRESS')
kmi.properties.name = "pie.brushes_of"
addon_keymaps.append((km, kmi))
def unregister():
for cls in classes:
bpy.utils.unregister_class(cls)
wm = bpy.context.window_manager
kc = wm.keyconfigs.addon
if kc:
for km, kmi in addon_keymaps:
km.keymap_items.remove(kmi)
addon_keymaps.clear()
if __name__ == "__main__":
register() | en | 0.686558 | # pie.operator_enum("PAINT_OT_brush_select", "sculpt_tool") | 2.204898 | 2 |
test/test_mountaincarcontinuous.py | LucasAlegre/rl-visualization | 0 | 6618817 | import gym
import numpy as np
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.noise import OrnsteinUhlenbeckActionNoise
from stable_baselines import SAC
from rl_visualization.visualization_env import VisualizationEnv
if __name__ == '__main__':
env = gym.make('MountainCarContinuous-v0')
env = VisualizationEnv(env,
steps_lookback=10000,
refresh_time=30,
features_names=['Car Position', 'Car Velocity'],
actions_names=['Push car to the left (negative value) or to the right (positive value)']
)
model = SAC(MlpPolicy, env, verbose=1, action_noise=OrnsteinUhlenbeckActionNoise(mean=np.zeros(1), sigma=0.5 * np.ones(1)))
model.learn(total_timesteps=60000)
obs = env.reset()
for i in range(100000):
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()
env.close()
env.join() | import gym
import numpy as np
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines.common.noise import OrnsteinUhlenbeckActionNoise
from stable_baselines import SAC
from rl_visualization.visualization_env import VisualizationEnv
if __name__ == '__main__':
env = gym.make('MountainCarContinuous-v0')
env = VisualizationEnv(env,
steps_lookback=10000,
refresh_time=30,
features_names=['Car Position', 'Car Velocity'],
actions_names=['Push car to the left (negative value) or to the right (positive value)']
)
model = SAC(MlpPolicy, env, verbose=1, action_noise=OrnsteinUhlenbeckActionNoise(mean=np.zeros(1), sigma=0.5 * np.ones(1)))
model.learn(total_timesteps=60000)
obs = env.reset()
for i in range(100000):
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()
env.close()
env.join() | none | 1 | 2.268054 | 2 | |
tools/flops.py | AlbertoRosado1/desihigh | 22 | 6618818 | from numpy import arange
#!/usr/local/bin/python2.5
#/*****************************/
#/* flops.c */
#/* Version 2.0, 18 Dec 1992 */
#/* <NAME> */
#/* <EMAIL> */
#/*****************************/
__author__ = '<NAME>'
"""
/*
Flops.c is a 'c' program which attempts to estimate your systems
floating-point 'MFLOPS' rating for the FADD, FSUB, FMUL, and FDIV
operations based on specific 'instruction mixes' (discussed below).
The program provides an estimate of PEAK MFLOPS performance by making
maximal use of register variables with minimal interaction with main
memory. The execution loops are all small so that they will fit in
any cache. Flops.c can be used along with Linpack and the Livermore
kernels (which exersize memory much more extensively) to gain further
insight into the limits of system performance. The flops.c execution
modules also include various percent weightings of FDIV's (from 0% to
25% FDIV's) so that the range of performance can be obtained when
using FDIV's. FDIV's, being computationally more intensive than
FADD's or FMUL's, can impact performance considerably on some systems.
Flops.c consists of 8 independent modules (routines) which, except for
module 2, conduct numerical integration of various functions. Module
2, estimates the value of pi based upon the Maclaurin series expansion
of atan(1). MFLOPS ratings are provided for each module, but the
programs overall results are summerized by the MFLOPS(1), MFLOPS(2),
MFLOPS(3), and MFLOPS(4) outputs.
The MFLOPS(1) result is identical to the result provided by all
previous versions of flops.c. It is based only upon the results from
modules 2 and 3. Two problems surfaced in using MFLOPS(1). First, it
was difficult to completely 'vectorize' the result due to the
recurrence of the 's' variable in module 2. This problem is addressed
in the MFLOPS(2) result which does not use module 2, but maintains
nearly the same weighting of FDIV's (9.2%) as in MFLOPS(1) (9.6%).
The second problem with MFLOPS(1) centers around the percentage of
FDIV's (9.6%) which was viewed as too high for an important class of
problems. This concern is addressed in the MFLOPS(3) result where NO
FDIV's are conducted at all.
The number of floating-point instructions per iteration (loop) is
given below for each module executed:
MODULE FADD FSUB FMUL FDIV TOTAL Comment
1 7 0 6 1 14 7.1% FDIV's
2 3 2 1 1 7 difficult to vectorize.
3 6 2 9 0 17 0.0% FDIV's
4 7 0 8 0 15 0.0% FDIV's
5 13 0 15 1 29 3.4% FDIV's
6 13 0 16 0 29 0.0% FDIV's
7 3 3 3 3 12 25.0% FDIV's
8 13 0 17 0 30 0.0% FDIV's
A*2+3 21 12 14 5 52 A=5, MFLOPS(1), Same as
40.4% 23.1% 26.9% 9.6% previous versions of the
flops.c program. Includes
only Modules 2 and 3, does
9.6% FDIV's, and is not
easily vectorizable.
1+3+4 58 14 66 14 152 A=4, MFLOPS(2), New output
+5+6+ 38.2% 9.2% 43.4% 9.2% does not include Module 2,
A*7 but does 9.2% FDIV's.
1+3+4 62 5 74 5 146 A=0, MFLOPS(3), New output
+5+6+ 42.9% 3.4% 50.7% 3.4% does not include Module 2,
7+8 but does 3.4% FDIV's.
3+4+6 39 2 50 0 91 A=0, MFLOPS(4), New output
+8 42.9% 2.2% 54.9% 0.0% does not include Module 2,
and does NO FDIV's.
NOTE: Various timer routines are included as indicated below. The
timer routines, with some comments, are attached at the end
of the main program.
NOTE: Please do not remove any of the printouts.
EXAMPLE COMPILATION:
UNIX based systems
cc -DUNIX -O flops.c -o flops
cc -DUNIX -DROPT flops.c -o flops
cc -DUNIX -fast -O4 flops.c -o flops
.
.
.
etc.
<NAME>
<EMAIL>
*/
"""
import math
import time
def dtime(p):
q = p[2]
p[2] = time.time()
p[1] = p[2] - q
def flops(quick=True):
TimeArray = [0.0, 0.0, 0.0]
#double nulltime, TimeArray[3]; /* Variables needed for 'dtime()'. */
#double TLimit; /* Threshold to determine Number of */
# /* Loops to run. Fixed at 15.0 seconds.*/
T = [0.0 for x in arange(36)]
#double T[36]; /* Global Array used to hold timing */
# /* results and other information. */
#double sa,sb,sc,sd,one,two,three;
#double four,five,piref,piprg;
#double scale,pierr;
A0 = 1.0
A1 = -0.1666666666671334
A2 = 0.833333333809067E-2
A3 = 0.198412715551283E-3
A4 = 0.27557589750762E-5
A5 = 0.2507059876207E-7
A6 = 0.164105986683E-9
B0 = 1.0
B1 = -0.4999999999982
B2 = 0.4166666664651E-1
B3 = -0.1388888805755E-2
B4 = 0.24801428034E-4
B5 = -0.2754213324E-6
B6 = 0.20189405E-8
C0 = 1.0
C1 = 0.99999999668
C2 = 0.49999995173
C3 = 0.16666704243
C4 = 0.4166685027E-1
C5 = 0.832672635E-2
C6 = 0.140836136E-2
C7 = 0.17358267E-3
C8 = 0.3931683E-4
D1 = 0.3999999946405E-1
D2 = 0.96E-3
D3 = 0.1233153E-5
E2 = 0.48E-3
E3 = 0.411051E-6
print("\n")
print(" FLOPS Python Program (Double Precision), V2.0 18 Dec 1992\n\n")
# Initial number of loops. Original code claims this is a magic number.
loops = 15625
#/****************************************************/
#/* Set Variable Values. */
#/* T[1] references all timing results relative to */
#/* one million loops. */
#/* */
#/* The program will execute from 31250 to 512000000 */
#/* loops based on a runtime of Module 1 of at least */
#/* TLimit = 15.0 seconds. That is, a runtime of 15 */
#/* seconds for Module 1 is used to determine the */
#/* number of loops to execute. */
#/* */
#/* No more than NLimit = 512000000 loops are allowed*/
#/****************************************************/
T[1] = 1.0E+06/loops
TLimit = 15.0
NLimit = 512000000
piref = 3.14159265358979324
one = 1.0
two = 2.0
three = 3.0
four = 4.0
five = 5.0
scale = one
print(" Module Error RunTime MFLOPS\n")
print(" (usec)\n")
#/*************************/
#/* Initialize the timer. */
#/*************************/
dtime(TimeArray)
dtime(TimeArray)
#/*******************************************************/
#/* Module 1. Calculate integral of df(x)/f(x) defined */
#/* below. Result is ln(f(1)). There are 14 */
#/* double precision operations per loop */
#/* ( 7 +, 0 -, 6 *, 1 / ) that are included */
#/* in the timing. */
#/* 50.0% +, 00.0% -, 42.9% *, and 07.1% / */
#/*******************************************************/
n = loops
sa = 0.0
while sa < TLimit:
n = 2 * n
x = one / n # /*********************/
s = 0.0 # /* Loop 1. */
v = 0.0 # /*********************/
w = one
dtime(TimeArray)
for i in arange(1,n):
v = v + w
u = v * x
s = s + (D1+u*(D2+u*D3))/(w+u*(D1+u*(E2+u*E3)))
dtime(TimeArray)
sa = TimeArray[1]
if n == NLimit:
break
#/* printf(" %10ld %12.5lf\n",n,sa); */
scale = 1.0E+06 / n
T[1] = scale
#/****************************************/
#/* Estimate nulltime ('for' loop time). */
#/****************************************/
dtime(TimeArray)
for i in arange(1, n):
pass
dtime(TimeArray)
nulltime = T[1] * TimeArray[1]
if nulltime < 0.0:
nulltime = 0.0
T[2] = T[1] * sa - nulltime
sa = (D1+D2+D3)/(one+D1+E2+E3)
sb = D1
T[3] = T[2] / 14.0# /*********************/
sa = x * ( sa + sb + two * s ) / two# /* Module 1 Results. */
sb = one / sa# /*********************/
n = int( ( 40000 * sb ) / scale )
sc = sb - 25.2
T[4] = one / T[3]
# /********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /********************/
#// printf(" 1 %13.4le %10.4lf %10.4lf\n",sc,T[2],T[4])
print(" 1 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[2],T[4]))
m = n
#/*******************************************************/
#/* Module 2. Calculate value of PI from Taylor Series */
#/* expansion of atan(1.0). There are 7 */
#/* double precision operations per loop */
#/* ( 3 +, 2 -, 1 *, 1 / ) that are included */
#/* in the timing. */
#/* 42.9% +, 28.6% -, 14.3% *, and 14.3% / */
#/*******************************************************/
s = -five# /********************/
sa = -one# /* Loop 2. */
# /********************/
dtime(TimeArray)
for i in arange(1, m+1):
s = -s
sa = sa + s
dtime(TimeArray)
T[5] = T[1] * TimeArray[1]
if T[5] < 0.0:
T[5] = 0.0
sc = m
u = sa# /*********************/
v = 0.0# /* Loop 3. */
w = 0.0# /*********************/
x = 0.0
dtime(TimeArray)
for i in arange(1, m+1):
s = -s
sa = sa + s
u = u + two
x = x +(s - u)
v = v - s * u
w = w + s / u
dtime(TimeArray)
T[6] = T[1] * TimeArray[1]
T[7] = ( T[6] - T[5] ) / 7.0# /*********************/
m = int( sa * x / sc )# /* PI Results */
sa = four * w / five# /*********************/
sb = sa + five / v
sc = 31.25
piprg = sb - sc / (v * v * v)
pierr = piprg - piref
T[8] = one / T[7]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 2 {:13.4e} {:10.4f} {:10.4f}\n".format(pierr,T[6]-T[5],T[8]))
#/*******************************************************/
#/* Module 3. Calculate integral of sin(x) from 0.0 to */
#/* PI/3.0 using Trapazoidal Method. Result */
#/* is 0.5. There are 17 double precision */
#/* operations per loop (6 +, 2 -, 9 *, 0 /) */
#/* included in the timing. */
#/* 35.3% +, 11.8% -, 52.9% *, and 00.0% / */
#/*******************************************************/
x = piref / ( three * m )# /*********************/
s = 0.0# /* Loop 4. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in arange(1, m):
v = v + one
u = v * x
w = u * u
s = s + u * ((((((A6*w-A5)*w+A4)*w-A3)*w+A2)*w+A1)*w+one)
dtime(TimeArray)
T[9] = T[1] * TimeArray[1] - nulltime
u = piref / three
w = u * u
sa = u * ((((((A6*w-A5)*w+A4)*w-A3)*w+A2)*w+A1)*w+one)
T[10] = T[9] / 17.0# /*********************/
sa = x * ( sa + two * s ) / two# /* sin(x) Results. */
sb = 0.5# /*********************/
sc = sa - sb
T[11] = one / T[10]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 3 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[9],T[11]))
if quick:
return 0
#/************************************************************/
#/* Module 4. Calculate Integral of cos(x) from 0.0 to PI/3 */
#/* using the Trapazoidal Method. Result is */
#/* sin(PI/3). There are 15 double precision */
#/* operations per loop (7 +, 0 -, 8 *, and 0 / ) */
#/* included in the timing. */
#/* 50.0% +, 00.0% -, 50.0% *, 00.0% / */
#/************************************************************/
A3 = -A3
A5 = -A5
x = piref / ( three * m )# /*********************/
s = 0.0# /* Loop 5. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in arange(1, m):
u = i * x
w = u * u
s = s + w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
dtime(TimeArray)
T[12] = T[1] * TimeArray[1] - nulltime
u = piref / three
w = u * u
sa = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
T[13] = T[12] / 15.0# /*******************/
sa = x * ( sa + one + two * s ) / two# /* Module 4 Result */
u = piref / three# /*******************/
w = u * u
sb = u * ((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+A0)
sc = sa - sb
T[14] = one / T[13]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 4 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[12],T[14]))
#/************************************************************/
#/* Module 5. Calculate Integral of tan(x) from 0.0 to PI/3 */
#/* using the Trapazoidal Method. Result is */
#/* ln(cos(PI/3)). There are 29 double precision */
#/* operations per loop (13 +, 0 -, 15 *, and 1 /)*/
#/* included in the timing. */
#/* 46.7% +, 00.0% -, 50.0% *, and 03.3% / */
#/************************************************************/
x = piref / ( three * m )# /*********************/
s = 0.0# /* Loop 6. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in arange(1, m):
u = i * x
w = u * u
v = u * ((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
s = s + v / (w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one)
dtime(TimeArray)
T[15] = T[1] * TimeArray[1] - nulltime
u = piref / three
w = u * u
sa = u*((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
sb = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
sa = sa / sb
T[16] = T[15] / 29.0# /*******************/
sa = x * ( sa + two * s ) / two# /* Module 5 Result */
sb = 0.6931471805599453# /*******************/
sc = sa - sb
T[17] = one / T[16]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 5 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[15],T[17]))
#/************************************************************/
#/* Module 6. Calculate Integral of sin(x)*cos(x) from 0.0 */
#/* to PI/4 using the Trapazoidal Method. Result */
#/* is sin(PI/4)^2. There are 29 double precision */
#/* operations per loop (13 +, 0 -, 16 *, and 0 /)*/
#/* included in the timing. */
#/* 46.7% +, 00.0% -, 53.3% *, and 00.0% / */
#/************************************************************/
x = piref / ( four * m )# /*********************/
s = 0.0# /* Loop 7. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in arange(1, m):
u = i * x
w = u * u
v = u * ((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
s = s + v*(w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one)
dtime(TimeArray)
T[18] = T[1] * TimeArray[1] - nulltime
u = piref / four
w = u * u
sa = u*((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
sb = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
sa = sa * sb
T[19] = T[18] / 29.0# /*******************/
sa = x * ( sa + two * s ) / two# /* Module 6 Result */
sb = 0.25# /*******************/
sc = sa - sb
T[20] = one / T[19]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 6 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[18],T[20]))
#/*******************************************************/
#/* Module 7. Calculate value of the definite integral */
#/* from 0 to sa of 1/(x+1), x/(x*x+1), and */
#/* x*x/(x*x*x+1) using the Trapizoidal Rule.*/
#/* There are 12 double precision operations */
#/* per loop ( 3 +, 3 -, 3 *, and 3 / ) that */
#/* are included in the timing. */
#/* 25.0% +, 25.0% -, 25.0% *, and 25.0% / */
#/*******************************************************/
# /*********************/
s = 0.0# /* Loop 8. */
w = one# /*********************/
sa = 102.3321513995275
v = sa / m
dtime(TimeArray)
for i in arange(1, m):
x = i * v
u = x * x
s = s - w / ( x + w ) - x / ( u + w ) - u / ( x * u + w )
dtime(TimeArray)
T[21] = T[1] * TimeArray[1] - nulltime
# /*********************/
# /* Module 7 Results */
# /*********************/
T[22] = T[21] / 12.0
x = sa
u = x * x
sa = -w - w / ( x + w ) - x / ( u + w ) - u / ( x * u + w )
sa = 18.0 * v * (sa + two * s )
m = -2000 * int(sa)
m = int( m / scale )
sc = sa + 500.2
T[23] = one / T[22]
# /********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /********************/
print(" 7 {:13.4e} {:10.4f] {:10.4f}\n".format(sc,T[21],T[23]))
#/************************************************************/
#/* Module 8. Calculate Integral of sin(x)*cos(x)*cos(x) */
#/* from 0 to PI/3 using the Trapazoidal Method. */
#/* Result is (1-cos(PI/3)^3)/3. There are 30 */
#/* double precision operations per loop included */
#/* in the timing: */
#/* 13 +, 0 -, 17 * 0 / */
#/* 46.7% +, 00.0% -, 53.3% *, and 00.0% / */
#/************************************************************/
x = piref / ( three * m )# /*********************/
s = 0.0# /* Loop 9. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in range(1, m):
u = i * x
w = u * u
v = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
s = s + v*v*u*((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
dtime(TimeArray)
T[24] = T[1] * TimeArray[1] - nulltime
u = piref / three
w = u * u
sa = u*((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
sb = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
sa = sa * sb * sb
T[25] = T[24] / 30.0# /*******************/
sa = x * ( sa + two * s ) / two# /* Module 8 Result */
sb = 0.29166666666666667# /*******************/
sc = sa - sb
T[26] = one / T[25]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 8 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[24],T[26]))
#/**************************************************/
#/* MFLOPS(1) output. This is the same weighting */
#/* used for all previous versions of the flops.c */
#/* program. Includes Modules 2 and 3 only. */
#/**************************************************/
T[27] = ( five * (T[6] - T[5]) + T[9] ) / 52.0
T[28] = one / T[27]
#/**************************************************/
#/* MFLOPS(2) output. This output does not include */
#/* Module 2, but it still does 9.2% FDIV's. */
#/**************************************************/
T[29] = T[2] + T[9] + T[12] + T[15] + T[18]
T[29] = (T[29] + four * T[21]) / 152.0
T[30] = one / T[29]
#/**************************************************/
#/* MFLOPS(3) output. This output does not include */
#/* Module 2, but it still does 3.4% FDIV's. */
#/**************************************************/
T[31] = T[2] + T[9] + T[12] + T[15] + T[18]
T[31] = (T[31] + T[21] + T[24]) / 146.0
T[32] = one / T[31]
#/**************************************************/
#/* MFLOPS(4) output. This output does not include */
#/* Module 2, and it does NO FDIV's. */
#/**************************************************/
T[33] = (T[9] + T[12] + T[18] + T[24]) / 91.0
T[34] = one / T[33]
print("\n")
print(" Iterations = {:10d\n}".format(m))
print(" NullTime (usec) = {:10.4f}\n".format(nulltime))
print(" MFLOPS(1) = {:10.4f}\n".format(T[28]))
print(" MFLOPS(2) = {:10.4f}\n".format(T[30]))
print(" MFLOPS(3) = {:10.4f}\n".format(T[32]))
print(" MFLOPS(4) = {:10.4f}\n\n".format(T[34]))
if __name__ == "__main__":
flops()
| from numpy import arange
#!/usr/local/bin/python2.5
#/*****************************/
#/* flops.c */
#/* Version 2.0, 18 Dec 1992 */
#/* <NAME> */
#/* <EMAIL> */
#/*****************************/
__author__ = '<NAME>'
"""
/*
Flops.c is a 'c' program which attempts to estimate your systems
floating-point 'MFLOPS' rating for the FADD, FSUB, FMUL, and FDIV
operations based on specific 'instruction mixes' (discussed below).
The program provides an estimate of PEAK MFLOPS performance by making
maximal use of register variables with minimal interaction with main
memory. The execution loops are all small so that they will fit in
any cache. Flops.c can be used along with Linpack and the Livermore
kernels (which exersize memory much more extensively) to gain further
insight into the limits of system performance. The flops.c execution
modules also include various percent weightings of FDIV's (from 0% to
25% FDIV's) so that the range of performance can be obtained when
using FDIV's. FDIV's, being computationally more intensive than
FADD's or FMUL's, can impact performance considerably on some systems.
Flops.c consists of 8 independent modules (routines) which, except for
module 2, conduct numerical integration of various functions. Module
2, estimates the value of pi based upon the Maclaurin series expansion
of atan(1). MFLOPS ratings are provided for each module, but the
programs overall results are summerized by the MFLOPS(1), MFLOPS(2),
MFLOPS(3), and MFLOPS(4) outputs.
The MFLOPS(1) result is identical to the result provided by all
previous versions of flops.c. It is based only upon the results from
modules 2 and 3. Two problems surfaced in using MFLOPS(1). First, it
was difficult to completely 'vectorize' the result due to the
recurrence of the 's' variable in module 2. This problem is addressed
in the MFLOPS(2) result which does not use module 2, but maintains
nearly the same weighting of FDIV's (9.2%) as in MFLOPS(1) (9.6%).
The second problem with MFLOPS(1) centers around the percentage of
FDIV's (9.6%) which was viewed as too high for an important class of
problems. This concern is addressed in the MFLOPS(3) result where NO
FDIV's are conducted at all.
The number of floating-point instructions per iteration (loop) is
given below for each module executed:
MODULE FADD FSUB FMUL FDIV TOTAL Comment
1 7 0 6 1 14 7.1% FDIV's
2 3 2 1 1 7 difficult to vectorize.
3 6 2 9 0 17 0.0% FDIV's
4 7 0 8 0 15 0.0% FDIV's
5 13 0 15 1 29 3.4% FDIV's
6 13 0 16 0 29 0.0% FDIV's
7 3 3 3 3 12 25.0% FDIV's
8 13 0 17 0 30 0.0% FDIV's
A*2+3 21 12 14 5 52 A=5, MFLOPS(1), Same as
40.4% 23.1% 26.9% 9.6% previous versions of the
flops.c program. Includes
only Modules 2 and 3, does
9.6% FDIV's, and is not
easily vectorizable.
1+3+4 58 14 66 14 152 A=4, MFLOPS(2), New output
+5+6+ 38.2% 9.2% 43.4% 9.2% does not include Module 2,
A*7 but does 9.2% FDIV's.
1+3+4 62 5 74 5 146 A=0, MFLOPS(3), New output
+5+6+ 42.9% 3.4% 50.7% 3.4% does not include Module 2,
7+8 but does 3.4% FDIV's.
3+4+6 39 2 50 0 91 A=0, MFLOPS(4), New output
+8 42.9% 2.2% 54.9% 0.0% does not include Module 2,
and does NO FDIV's.
NOTE: Various timer routines are included as indicated below. The
timer routines, with some comments, are attached at the end
of the main program.
NOTE: Please do not remove any of the printouts.
EXAMPLE COMPILATION:
UNIX based systems
cc -DUNIX -O flops.c -o flops
cc -DUNIX -DROPT flops.c -o flops
cc -DUNIX -fast -O4 flops.c -o flops
.
.
.
etc.
<NAME>
<EMAIL>
*/
"""
import math
import time
def dtime(p):
q = p[2]
p[2] = time.time()
p[1] = p[2] - q
def flops(quick=True):
TimeArray = [0.0, 0.0, 0.0]
#double nulltime, TimeArray[3]; /* Variables needed for 'dtime()'. */
#double TLimit; /* Threshold to determine Number of */
# /* Loops to run. Fixed at 15.0 seconds.*/
T = [0.0 for x in arange(36)]
#double T[36]; /* Global Array used to hold timing */
# /* results and other information. */
#double sa,sb,sc,sd,one,two,three;
#double four,five,piref,piprg;
#double scale,pierr;
A0 = 1.0
A1 = -0.1666666666671334
A2 = 0.833333333809067E-2
A3 = 0.198412715551283E-3
A4 = 0.27557589750762E-5
A5 = 0.2507059876207E-7
A6 = 0.164105986683E-9
B0 = 1.0
B1 = -0.4999999999982
B2 = 0.4166666664651E-1
B3 = -0.1388888805755E-2
B4 = 0.24801428034E-4
B5 = -0.2754213324E-6
B6 = 0.20189405E-8
C0 = 1.0
C1 = 0.99999999668
C2 = 0.49999995173
C3 = 0.16666704243
C4 = 0.4166685027E-1
C5 = 0.832672635E-2
C6 = 0.140836136E-2
C7 = 0.17358267E-3
C8 = 0.3931683E-4
D1 = 0.3999999946405E-1
D2 = 0.96E-3
D3 = 0.1233153E-5
E2 = 0.48E-3
E3 = 0.411051E-6
print("\n")
print(" FLOPS Python Program (Double Precision), V2.0 18 Dec 1992\n\n")
# Initial number of loops. Original code claims this is a magic number.
loops = 15625
#/****************************************************/
#/* Set Variable Values. */
#/* T[1] references all timing results relative to */
#/* one million loops. */
#/* */
#/* The program will execute from 31250 to 512000000 */
#/* loops based on a runtime of Module 1 of at least */
#/* TLimit = 15.0 seconds. That is, a runtime of 15 */
#/* seconds for Module 1 is used to determine the */
#/* number of loops to execute. */
#/* */
#/* No more than NLimit = 512000000 loops are allowed*/
#/****************************************************/
T[1] = 1.0E+06/loops
TLimit = 15.0
NLimit = 512000000
piref = 3.14159265358979324
one = 1.0
two = 2.0
three = 3.0
four = 4.0
five = 5.0
scale = one
print(" Module Error RunTime MFLOPS\n")
print(" (usec)\n")
#/*************************/
#/* Initialize the timer. */
#/*************************/
dtime(TimeArray)
dtime(TimeArray)
#/*******************************************************/
#/* Module 1. Calculate integral of df(x)/f(x) defined */
#/* below. Result is ln(f(1)). There are 14 */
#/* double precision operations per loop */
#/* ( 7 +, 0 -, 6 *, 1 / ) that are included */
#/* in the timing. */
#/* 50.0% +, 00.0% -, 42.9% *, and 07.1% / */
#/*******************************************************/
n = loops
sa = 0.0
while sa < TLimit:
n = 2 * n
x = one / n # /*********************/
s = 0.0 # /* Loop 1. */
v = 0.0 # /*********************/
w = one
dtime(TimeArray)
for i in arange(1,n):
v = v + w
u = v * x
s = s + (D1+u*(D2+u*D3))/(w+u*(D1+u*(E2+u*E3)))
dtime(TimeArray)
sa = TimeArray[1]
if n == NLimit:
break
#/* printf(" %10ld %12.5lf\n",n,sa); */
scale = 1.0E+06 / n
T[1] = scale
#/****************************************/
#/* Estimate nulltime ('for' loop time). */
#/****************************************/
dtime(TimeArray)
for i in arange(1, n):
pass
dtime(TimeArray)
nulltime = T[1] * TimeArray[1]
if nulltime < 0.0:
nulltime = 0.0
T[2] = T[1] * sa - nulltime
sa = (D1+D2+D3)/(one+D1+E2+E3)
sb = D1
T[3] = T[2] / 14.0# /*********************/
sa = x * ( sa + sb + two * s ) / two# /* Module 1 Results. */
sb = one / sa# /*********************/
n = int( ( 40000 * sb ) / scale )
sc = sb - 25.2
T[4] = one / T[3]
# /********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /********************/
#// printf(" 1 %13.4le %10.4lf %10.4lf\n",sc,T[2],T[4])
print(" 1 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[2],T[4]))
m = n
#/*******************************************************/
#/* Module 2. Calculate value of PI from Taylor Series */
#/* expansion of atan(1.0). There are 7 */
#/* double precision operations per loop */
#/* ( 3 +, 2 -, 1 *, 1 / ) that are included */
#/* in the timing. */
#/* 42.9% +, 28.6% -, 14.3% *, and 14.3% / */
#/*******************************************************/
s = -five# /********************/
sa = -one# /* Loop 2. */
# /********************/
dtime(TimeArray)
for i in arange(1, m+1):
s = -s
sa = sa + s
dtime(TimeArray)
T[5] = T[1] * TimeArray[1]
if T[5] < 0.0:
T[5] = 0.0
sc = m
u = sa# /*********************/
v = 0.0# /* Loop 3. */
w = 0.0# /*********************/
x = 0.0
dtime(TimeArray)
for i in arange(1, m+1):
s = -s
sa = sa + s
u = u + two
x = x +(s - u)
v = v - s * u
w = w + s / u
dtime(TimeArray)
T[6] = T[1] * TimeArray[1]
T[7] = ( T[6] - T[5] ) / 7.0# /*********************/
m = int( sa * x / sc )# /* PI Results */
sa = four * w / five# /*********************/
sb = sa + five / v
sc = 31.25
piprg = sb - sc / (v * v * v)
pierr = piprg - piref
T[8] = one / T[7]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 2 {:13.4e} {:10.4f} {:10.4f}\n".format(pierr,T[6]-T[5],T[8]))
#/*******************************************************/
#/* Module 3. Calculate integral of sin(x) from 0.0 to */
#/* PI/3.0 using Trapazoidal Method. Result */
#/* is 0.5. There are 17 double precision */
#/* operations per loop (6 +, 2 -, 9 *, 0 /) */
#/* included in the timing. */
#/* 35.3% +, 11.8% -, 52.9% *, and 00.0% / */
#/*******************************************************/
x = piref / ( three * m )# /*********************/
s = 0.0# /* Loop 4. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in arange(1, m):
v = v + one
u = v * x
w = u * u
s = s + u * ((((((A6*w-A5)*w+A4)*w-A3)*w+A2)*w+A1)*w+one)
dtime(TimeArray)
T[9] = T[1] * TimeArray[1] - nulltime
u = piref / three
w = u * u
sa = u * ((((((A6*w-A5)*w+A4)*w-A3)*w+A2)*w+A1)*w+one)
T[10] = T[9] / 17.0# /*********************/
sa = x * ( sa + two * s ) / two# /* sin(x) Results. */
sb = 0.5# /*********************/
sc = sa - sb
T[11] = one / T[10]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 3 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[9],T[11]))
if quick:
return 0
#/************************************************************/
#/* Module 4. Calculate Integral of cos(x) from 0.0 to PI/3 */
#/* using the Trapazoidal Method. Result is */
#/* sin(PI/3). There are 15 double precision */
#/* operations per loop (7 +, 0 -, 8 *, and 0 / ) */
#/* included in the timing. */
#/* 50.0% +, 00.0% -, 50.0% *, 00.0% / */
#/************************************************************/
A3 = -A3
A5 = -A5
x = piref / ( three * m )# /*********************/
s = 0.0# /* Loop 5. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in arange(1, m):
u = i * x
w = u * u
s = s + w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
dtime(TimeArray)
T[12] = T[1] * TimeArray[1] - nulltime
u = piref / three
w = u * u
sa = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
T[13] = T[12] / 15.0# /*******************/
sa = x * ( sa + one + two * s ) / two# /* Module 4 Result */
u = piref / three# /*******************/
w = u * u
sb = u * ((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+A0)
sc = sa - sb
T[14] = one / T[13]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 4 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[12],T[14]))
#/************************************************************/
#/* Module 5. Calculate Integral of tan(x) from 0.0 to PI/3 */
#/* using the Trapazoidal Method. Result is */
#/* ln(cos(PI/3)). There are 29 double precision */
#/* operations per loop (13 +, 0 -, 15 *, and 1 /)*/
#/* included in the timing. */
#/* 46.7% +, 00.0% -, 50.0% *, and 03.3% / */
#/************************************************************/
x = piref / ( three * m )# /*********************/
s = 0.0# /* Loop 6. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in arange(1, m):
u = i * x
w = u * u
v = u * ((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
s = s + v / (w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one)
dtime(TimeArray)
T[15] = T[1] * TimeArray[1] - nulltime
u = piref / three
w = u * u
sa = u*((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
sb = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
sa = sa / sb
T[16] = T[15] / 29.0# /*******************/
sa = x * ( sa + two * s ) / two# /* Module 5 Result */
sb = 0.6931471805599453# /*******************/
sc = sa - sb
T[17] = one / T[16]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 5 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[15],T[17]))
#/************************************************************/
#/* Module 6. Calculate Integral of sin(x)*cos(x) from 0.0 */
#/* to PI/4 using the Trapazoidal Method. Result */
#/* is sin(PI/4)^2. There are 29 double precision */
#/* operations per loop (13 +, 0 -, 16 *, and 0 /)*/
#/* included in the timing. */
#/* 46.7% +, 00.0% -, 53.3% *, and 00.0% / */
#/************************************************************/
x = piref / ( four * m )# /*********************/
s = 0.0# /* Loop 7. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in arange(1, m):
u = i * x
w = u * u
v = u * ((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
s = s + v*(w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one)
dtime(TimeArray)
T[18] = T[1] * TimeArray[1] - nulltime
u = piref / four
w = u * u
sa = u*((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
sb = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
sa = sa * sb
T[19] = T[18] / 29.0# /*******************/
sa = x * ( sa + two * s ) / two# /* Module 6 Result */
sb = 0.25# /*******************/
sc = sa - sb
T[20] = one / T[19]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 6 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[18],T[20]))
#/*******************************************************/
#/* Module 7. Calculate value of the definite integral */
#/* from 0 to sa of 1/(x+1), x/(x*x+1), and */
#/* x*x/(x*x*x+1) using the Trapizoidal Rule.*/
#/* There are 12 double precision operations */
#/* per loop ( 3 +, 3 -, 3 *, and 3 / ) that */
#/* are included in the timing. */
#/* 25.0% +, 25.0% -, 25.0% *, and 25.0% / */
#/*******************************************************/
# /*********************/
s = 0.0# /* Loop 8. */
w = one# /*********************/
sa = 102.3321513995275
v = sa / m
dtime(TimeArray)
for i in arange(1, m):
x = i * v
u = x * x
s = s - w / ( x + w ) - x / ( u + w ) - u / ( x * u + w )
dtime(TimeArray)
T[21] = T[1] * TimeArray[1] - nulltime
# /*********************/
# /* Module 7 Results */
# /*********************/
T[22] = T[21] / 12.0
x = sa
u = x * x
sa = -w - w / ( x + w ) - x / ( u + w ) - u / ( x * u + w )
sa = 18.0 * v * (sa + two * s )
m = -2000 * int(sa)
m = int( m / scale )
sc = sa + 500.2
T[23] = one / T[22]
# /********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /********************/
print(" 7 {:13.4e} {:10.4f] {:10.4f}\n".format(sc,T[21],T[23]))
#/************************************************************/
#/* Module 8. Calculate Integral of sin(x)*cos(x)*cos(x) */
#/* from 0 to PI/3 using the Trapazoidal Method. */
#/* Result is (1-cos(PI/3)^3)/3. There are 30 */
#/* double precision operations per loop included */
#/* in the timing: */
#/* 13 +, 0 -, 17 * 0 / */
#/* 46.7% +, 00.0% -, 53.3% *, and 00.0% / */
#/************************************************************/
x = piref / ( three * m )# /*********************/
s = 0.0# /* Loop 9. */
v = 0.0# /*********************/
dtime(TimeArray)
for i in range(1, m):
u = i * x
w = u * u
v = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
s = s + v*v*u*((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
dtime(TimeArray)
T[24] = T[1] * TimeArray[1] - nulltime
u = piref / three
w = u * u
sa = u*((((((A6*w+A5)*w+A4)*w+A3)*w+A2)*w+A1)*w+one)
sb = w*(w*(w*(w*(w*(B6*w+B5)+B4)+B3)+B2)+B1)+one
sa = sa * sb * sb
T[25] = T[24] / 30.0# /*******************/
sa = x * ( sa + two * s ) / two# /* Module 8 Result */
sb = 0.29166666666666667# /*******************/
sc = sa - sb
T[26] = one / T[25]
# /*********************/
# /* DO NOT REMOVE */
# /* THIS PRINTOUT! */
# /*********************/
print(" 8 {:13.4e} {:10.4f} {:10.4f}\n".format(sc,T[24],T[26]))
#/**************************************************/
#/* MFLOPS(1) output. This is the same weighting */
#/* used for all previous versions of the flops.c */
#/* program. Includes Modules 2 and 3 only. */
#/**************************************************/
T[27] = ( five * (T[6] - T[5]) + T[9] ) / 52.0
T[28] = one / T[27]
#/**************************************************/
#/* MFLOPS(2) output. This output does not include */
#/* Module 2, but it still does 9.2% FDIV's. */
#/**************************************************/
T[29] = T[2] + T[9] + T[12] + T[15] + T[18]
T[29] = (T[29] + four * T[21]) / 152.0
T[30] = one / T[29]
#/**************************************************/
#/* MFLOPS(3) output. This output does not include */
#/* Module 2, but it still does 3.4% FDIV's. */
#/**************************************************/
T[31] = T[2] + T[9] + T[12] + T[15] + T[18]
T[31] = (T[31] + T[21] + T[24]) / 146.0
T[32] = one / T[31]
#/**************************************************/
#/* MFLOPS(4) output. This output does not include */
#/* Module 2, and it does NO FDIV's. */
#/**************************************************/
T[33] = (T[9] + T[12] + T[18] + T[24]) / 91.0
T[34] = one / T[33]
print("\n")
print(" Iterations = {:10d\n}".format(m))
print(" NullTime (usec) = {:10.4f}\n".format(nulltime))
print(" MFLOPS(1) = {:10.4f}\n".format(T[28]))
print(" MFLOPS(2) = {:10.4f}\n".format(T[30]))
print(" MFLOPS(3) = {:10.4f}\n".format(T[32]))
print(" MFLOPS(4) = {:10.4f}\n\n".format(T[34]))
if __name__ == "__main__":
flops()
| en | 0.585953 | #!/usr/local/bin/python2.5 #/*****************************/ #/* flops.c */ #/* Version 2.0, 18 Dec 1992 */ #/* <NAME> */ #/* <EMAIL> */ #/*****************************/ /* Flops.c is a 'c' program which attempts to estimate your systems floating-point 'MFLOPS' rating for the FADD, FSUB, FMUL, and FDIV operations based on specific 'instruction mixes' (discussed below). The program provides an estimate of PEAK MFLOPS performance by making maximal use of register variables with minimal interaction with main memory. The execution loops are all small so that they will fit in any cache. Flops.c can be used along with Linpack and the Livermore kernels (which exersize memory much more extensively) to gain further insight into the limits of system performance. The flops.c execution modules also include various percent weightings of FDIV's (from 0% to 25% FDIV's) so that the range of performance can be obtained when using FDIV's. FDIV's, being computationally more intensive than FADD's or FMUL's, can impact performance considerably on some systems. Flops.c consists of 8 independent modules (routines) which, except for module 2, conduct numerical integration of various functions. Module 2, estimates the value of pi based upon the Maclaurin series expansion of atan(1). MFLOPS ratings are provided for each module, but the programs overall results are summerized by the MFLOPS(1), MFLOPS(2), MFLOPS(3), and MFLOPS(4) outputs. The MFLOPS(1) result is identical to the result provided by all previous versions of flops.c. It is based only upon the results from modules 2 and 3. Two problems surfaced in using MFLOPS(1). First, it was difficult to completely 'vectorize' the result due to the recurrence of the 's' variable in module 2. This problem is addressed in the MFLOPS(2) result which does not use module 2, but maintains nearly the same weighting of FDIV's (9.2%) as in MFLOPS(1) (9.6%). The second problem with MFLOPS(1) centers around the percentage of FDIV's (9.6%) which was viewed as too high for an important class of problems. This concern is addressed in the MFLOPS(3) result where NO FDIV's are conducted at all. The number of floating-point instructions per iteration (loop) is given below for each module executed: MODULE FADD FSUB FMUL FDIV TOTAL Comment 1 7 0 6 1 14 7.1% FDIV's 2 3 2 1 1 7 difficult to vectorize. 3 6 2 9 0 17 0.0% FDIV's 4 7 0 8 0 15 0.0% FDIV's 5 13 0 15 1 29 3.4% FDIV's 6 13 0 16 0 29 0.0% FDIV's 7 3 3 3 3 12 25.0% FDIV's 8 13 0 17 0 30 0.0% FDIV's A*2+3 21 12 14 5 52 A=5, MFLOPS(1), Same as 40.4% 23.1% 26.9% 9.6% previous versions of the flops.c program. Includes only Modules 2 and 3, does 9.6% FDIV's, and is not easily vectorizable. 1+3+4 58 14 66 14 152 A=4, MFLOPS(2), New output +5+6+ 38.2% 9.2% 43.4% 9.2% does not include Module 2, A*7 but does 9.2% FDIV's. 1+3+4 62 5 74 5 146 A=0, MFLOPS(3), New output +5+6+ 42.9% 3.4% 50.7% 3.4% does not include Module 2, 7+8 but does 3.4% FDIV's. 3+4+6 39 2 50 0 91 A=0, MFLOPS(4), New output +8 42.9% 2.2% 54.9% 0.0% does not include Module 2, and does NO FDIV's. NOTE: Various timer routines are included as indicated below. The timer routines, with some comments, are attached at the end of the main program. NOTE: Please do not remove any of the printouts. EXAMPLE COMPILATION: UNIX based systems cc -DUNIX -O flops.c -o flops cc -DUNIX -DROPT flops.c -o flops cc -DUNIX -fast -O4 flops.c -o flops . . . etc. <NAME> <EMAIL> */ #double nulltime, TimeArray[3]; /* Variables needed for 'dtime()'. */ #double TLimit; /* Threshold to determine Number of */ # /* Loops to run. Fixed at 15.0 seconds.*/ #double T[36]; /* Global Array used to hold timing */ # /* results and other information. */ #double sa,sb,sc,sd,one,two,three; #double four,five,piref,piprg; #double scale,pierr; # Initial number of loops. Original code claims this is a magic number. #/****************************************************/ #/* Set Variable Values. */ #/* T[1] references all timing results relative to */ #/* one million loops. */ #/* */ #/* The program will execute from 31250 to 512000000 */ #/* loops based on a runtime of Module 1 of at least */ #/* TLimit = 15.0 seconds. That is, a runtime of 15 */ #/* seconds for Module 1 is used to determine the */ #/* number of loops to execute. */ #/* */ #/* No more than NLimit = 512000000 loops are allowed*/ #/****************************************************/ #/*************************/ #/* Initialize the timer. */ #/*************************/ #/*******************************************************/ #/* Module 1. Calculate integral of df(x)/f(x) defined */ #/* below. Result is ln(f(1)). There are 14 */ #/* double precision operations per loop */ #/* ( 7 +, 0 -, 6 *, 1 / ) that are included */ #/* in the timing. */ #/* 50.0% +, 00.0% -, 42.9% *, and 07.1% / */ #/*******************************************************/ # /*********************/ # /* Loop 1. */ # /*********************/ #/* printf(" %10ld %12.5lf\n",n,sa); */ #/****************************************/ #/* Estimate nulltime ('for' loop time). */ #/****************************************/ # /*********************/ # /* Module 1 Results. */ # /*********************/ # /********************/ # /* DO NOT REMOVE */ # /* THIS PRINTOUT! */ # /********************/ #// printf(" 1 %13.4le %10.4lf %10.4lf\n",sc,T[2],T[4]) #/*******************************************************/ #/* Module 2. Calculate value of PI from Taylor Series */ #/* expansion of atan(1.0). There are 7 */ #/* double precision operations per loop */ #/* ( 3 +, 2 -, 1 *, 1 / ) that are included */ #/* in the timing. */ #/* 42.9% +, 28.6% -, 14.3% *, and 14.3% / */ #/*******************************************************/ # /********************/ # /* Loop 2. */ # /********************/ # /*********************/ # /* Loop 3. */ # /*********************/ # /*********************/ # /* PI Results */ # /*********************/ # /*********************/ # /* DO NOT REMOVE */ # /* THIS PRINTOUT! */ # /*********************/ #/*******************************************************/ #/* Module 3. Calculate integral of sin(x) from 0.0 to */ #/* PI/3.0 using Trapazoidal Method. Result */ #/* is 0.5. There are 17 double precision */ #/* operations per loop (6 +, 2 -, 9 *, 0 /) */ #/* included in the timing. */ #/* 35.3% +, 11.8% -, 52.9% *, and 00.0% / */ #/*******************************************************/ # /*********************/ # /* Loop 4. */ # /*********************/ # /*********************/ # /* sin(x) Results. */ # /*********************/ # /*********************/ # /* DO NOT REMOVE */ # /* THIS PRINTOUT! */ # /*********************/ #/************************************************************/ #/* Module 4. Calculate Integral of cos(x) from 0.0 to PI/3 */ #/* using the Trapazoidal Method. Result is */ #/* sin(PI/3). There are 15 double precision */ #/* operations per loop (7 +, 0 -, 8 *, and 0 / ) */ #/* included in the timing. */ #/* 50.0% +, 00.0% -, 50.0% *, 00.0% / */ #/************************************************************/ # /*********************/ # /* Loop 5. */ # /*********************/ # /*******************/ # /* Module 4 Result */ # /*******************/ # /*********************/ # /* DO NOT REMOVE */ # /* THIS PRINTOUT! */ # /*********************/ #/************************************************************/ #/* Module 5. Calculate Integral of tan(x) from 0.0 to PI/3 */ #/* using the Trapazoidal Method. Result is */ #/* ln(cos(PI/3)). There are 29 double precision */ #/* operations per loop (13 +, 0 -, 15 *, and 1 /)*/ #/* included in the timing. */ #/* 46.7% +, 00.0% -, 50.0% *, and 03.3% / */ #/************************************************************/ # /*********************/ # /* Loop 6. */ # /*********************/ # /*******************/ # /* Module 5 Result */ # /*******************/ # /*********************/ # /* DO NOT REMOVE */ # /* THIS PRINTOUT! */ # /*********************/ #/************************************************************/ #/* Module 6. Calculate Integral of sin(x)*cos(x) from 0.0 */ #/* to PI/4 using the Trapazoidal Method. Result */ #/* is sin(PI/4)^2. There are 29 double precision */ #/* operations per loop (13 +, 0 -, 16 *, and 0 /)*/ #/* included in the timing. */ #/* 46.7% +, 00.0% -, 53.3% *, and 00.0% / */ #/************************************************************/ # /*********************/ # /* Loop 7. */ # /*********************/ # /*******************/ # /* Module 6 Result */ # /*******************/ # /*********************/ # /* DO NOT REMOVE */ # /* THIS PRINTOUT! */ # /*********************/ #/*******************************************************/ #/* Module 7. Calculate value of the definite integral */ #/* from 0 to sa of 1/(x+1), x/(x*x+1), and */ #/* x*x/(x*x*x+1) using the Trapizoidal Rule.*/ #/* There are 12 double precision operations */ #/* per loop ( 3 +, 3 -, 3 *, and 3 / ) that */ #/* are included in the timing. */ #/* 25.0% +, 25.0% -, 25.0% *, and 25.0% / */ #/*******************************************************/ # /*********************/ # /* Loop 8. */ # /*********************/ # /*********************/ # /* Module 7 Results */ # /*********************/ # /********************/ # /* DO NOT REMOVE */ # /* THIS PRINTOUT! */ # /********************/ #/************************************************************/ #/* Module 8. Calculate Integral of sin(x)*cos(x)*cos(x) */ #/* from 0 to PI/3 using the Trapazoidal Method. */ #/* Result is (1-cos(PI/3)^3)/3. There are 30 */ #/* double precision operations per loop included */ #/* in the timing: */ #/* 13 +, 0 -, 17 * 0 / */ #/* 46.7% +, 00.0% -, 53.3% *, and 00.0% / */ #/************************************************************/ # /*********************/ # /* Loop 9. */ # /*********************/ # /*******************/ # /* Module 8 Result */ # /*******************/ # /*********************/ # /* DO NOT REMOVE */ # /* THIS PRINTOUT! */ # /*********************/ #/**************************************************/ #/* MFLOPS(1) output. This is the same weighting */ #/* used for all previous versions of the flops.c */ #/* program. Includes Modules 2 and 3 only. */ #/**************************************************/ #/**************************************************/ #/* MFLOPS(2) output. This output does not include */ #/* Module 2, but it still does 9.2% FDIV's. */ #/**************************************************/ #/**************************************************/ #/* MFLOPS(3) output. This output does not include */ #/* Module 2, but it still does 3.4% FDIV's. */ #/**************************************************/ #/**************************************************/ #/* MFLOPS(4) output. This output does not include */ #/* Module 2, and it does NO FDIV's. */ #/**************************************************/ | 2.437327 | 2 |
1249/minimum remove to make valid parentheses.py | cccccccccccccc/Myleetcode | 0 | 6618819 | <gh_stars>0
class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
fistparsestring = []
balance = 0
open_parenthese_found = 0
for c in s:
if c == '(':
balance +=1
open_parenthese_found += 1
if c == ')':
if balance == 0:
continue
balance-=1
fistparsestring.append(c)
result = []
open_parenthese_keep = open_parenthese_found - balance
for i in fistparsestring:
if i == '(':
open_parenthese_keep -= 1
if open_parenthese_keep < 0:
continue
result.append(i)
return "".join(result)
A = Solution()
s = ["a",")","b","(","c",")","d"]
s1 = ["l","e","(","t","(",")",")",")"]
s2 = [")",")","(","("]
s3 = ["(","a","(","b","(","c",")","d",")"]
s4 = ["(","("]
print(A.minRemoveToMakeValid(s3)) | class Solution:
def minRemoveToMakeValid(self, s: str) -> str:
fistparsestring = []
balance = 0
open_parenthese_found = 0
for c in s:
if c == '(':
balance +=1
open_parenthese_found += 1
if c == ')':
if balance == 0:
continue
balance-=1
fistparsestring.append(c)
result = []
open_parenthese_keep = open_parenthese_found - balance
for i in fistparsestring:
if i == '(':
open_parenthese_keep -= 1
if open_parenthese_keep < 0:
continue
result.append(i)
return "".join(result)
A = Solution()
s = ["a",")","b","(","c",")","d"]
s1 = ["l","e","(","t","(",")",")",")"]
s2 = [")",")","(","("]
s3 = ["(","a","(","b","(","c",")","d",")"]
s4 = ["(","("]
print(A.minRemoveToMakeValid(s3)) | none | 1 | 2.838913 | 3 | |
src/spyd/game/room/client_event_handlers/give_master_handler.py | DanSeraf/spyd | 0 | 6618820 | from spyd.registry_manager import register
from spyd.game.client.exceptions import GenericError
ADMIN_PASS = "<PASSWORD>"
class WrongCredentials(GenericError):
pass
@register('room_client_event_handler')
class GiveMasterHandler(object):
event_type = 'give_master'
@staticmethod
def handle(room, client, client_target):
room._client_change_privilege(client, client_target, 1)
| from spyd.registry_manager import register
from spyd.game.client.exceptions import GenericError
ADMIN_PASS = "<PASSWORD>"
class WrongCredentials(GenericError):
pass
@register('room_client_event_handler')
class GiveMasterHandler(object):
event_type = 'give_master'
@staticmethod
def handle(room, client, client_target):
room._client_change_privilege(client, client_target, 1)
| none | 1 | 1.954144 | 2 | |
pysm/preprocessing/museum_edm/x03_normalize_json.py | binh-vu/semantic-modeling | 3 | 6618821 | <reponame>binh-vu/semantic-modeling
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from typing import Dict, Tuple, List, Set, Union, Optional, Any
from semantic_modeling.config import config
from semantic_modeling.utilities.serializable import deserializeJSON, serializeJSON
"""Usually run after generate r2rml and copied from KARMA_HOME"""
dataset = "museum_edm"
model_dir = Path(config.datasets[dataset].karma_version.as_path()) / "models-json"
for file in sorted(model_dir.iterdir()):
sm = deserializeJSON(file)
sm['id'] = Path(sm['id']).stem
sm['name'] = sm['id']
serializeJSON(sm, model_dir / f"{sm['id']}-model.json", indent=4)
os.remove(file) | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
from pathlib import Path
from typing import Dict, Tuple, List, Set, Union, Optional, Any
from semantic_modeling.config import config
from semantic_modeling.utilities.serializable import deserializeJSON, serializeJSON
"""Usually run after generate r2rml and copied from KARMA_HOME"""
dataset = "museum_edm"
model_dir = Path(config.datasets[dataset].karma_version.as_path()) / "models-json"
for file in sorted(model_dir.iterdir()):
sm = deserializeJSON(file)
sm['id'] = Path(sm['id']).stem
sm['name'] = sm['id']
serializeJSON(sm, model_dir / f"{sm['id']}-model.json", indent=4)
os.remove(file) | en | 0.621171 | #!/usr/bin/python # -*- coding: utf-8 -*- Usually run after generate r2rml and copied from KARMA_HOME | 2.241064 | 2 |
tutorial/part_02/exceptions.py | gdmgent-internet-of-things/colleges-2122 | 0 | 6618822 | <reponame>gdmgent-internet-of-things/colleges-2122
import sys
def inclusive_range(*args):
numargs = len(args)
start = 0
step = 1
# Get paramters from args
if numargs < 1:
raise TypeError(f'Expected at least 1 argument, got {numargs} ')
elif numargs == 1:
stop = args[0]
elif numargs == 2:
(start, stop) = args
elif numargs == 3:
(start, stop, step) = args
else:
raise TypeError(f'Expected at most 3 argument, got {numargs} ')
# Generator
i = start
while i <= stop:
yield i
i += step
def main():
print('Application started!')
try:
x = 5/0
except ValueError:
print(f'I caught a value error!')
except:
print(f'Unknow Error: {sys.exc_info()[1]}')
else:
print(f'Good job {x}!')
try:
for i in inclusive_range():
print(i, end = ' ', flush=True)
print()
except TypeError as e:
print(f'Range error: {e}')
if __name__ == '__main__': main() | import sys
def inclusive_range(*args):
numargs = len(args)
start = 0
step = 1
# Get paramters from args
if numargs < 1:
raise TypeError(f'Expected at least 1 argument, got {numargs} ')
elif numargs == 1:
stop = args[0]
elif numargs == 2:
(start, stop) = args
elif numargs == 3:
(start, stop, step) = args
else:
raise TypeError(f'Expected at most 3 argument, got {numargs} ')
# Generator
i = start
while i <= stop:
yield i
i += step
def main():
print('Application started!')
try:
x = 5/0
except ValueError:
print(f'I caught a value error!')
except:
print(f'Unknow Error: {sys.exc_info()[1]}')
else:
print(f'Good job {x}!')
try:
for i in inclusive_range():
print(i, end = ' ', flush=True)
print()
except TypeError as e:
print(f'Range error: {e}')
if __name__ == '__main__': main() | en | 0.282551 | # Get paramters from args # Generator | 3.607024 | 4 |
demo.py | Freefighter/EM-algorithm-with-2-components | 0 | 6618823 | <reponame>Freefighter/EM-algorithm-with-2-components
import numpy as np
import EM_algo
from importlib import reload
reload(EM_algo)
if __name__ == "__main__":
# def Theta_init():
# beta_1_p = (np.random.random([3, 1]) - 0.5) * 10;
# beta_2_p = (np.random.random([3, 1]) - 0.5) * 10
# gamma_p = (np.random.random([3, 1]) - 0.5) * 10;
# sigma_p = 2
# return np.vstack([beta_1_p, beta_2_p, gamma_p, sigma_p])
# beta_1 = np.array([-1, 2, 3]).reshape(-1,1);
# beta_2 = np.array([2, -3, 4]).reshape(-1,1);
# gamma = np.array([2, -1, -1]).reshape(-1,1);
# sigma = 0.5
def Theta_init():
beta_1_p = ((np.random.random([3, 1])) - 0.5)/10;
beta_2_p = ((np.random.random([3, 1])) - 0.5)/10;
gamma_p = ((np.random.random([3, 1])) - 0.5)/10;
return np.vstack([beta_1_p, beta_2_p, gamma_p])
beta_1 = np.array([-1, 1, 1]).reshape(-1,1);
beta_2 = np.array([1.5, -1, 0.5]).reshape(-1,1);
gamma = np.array([2, -1.5, -0.5]).reshape(-1,1)
proc = EM_algo.EM_Algorithm(distribution="Poisson")
# proc.ans = np.vstack((beta_1, beta_2, gamma, sigma))
# proc.generateData((500, 3, 3), (beta_1, beta_2, gamma, sigma))
proc.ans = np.vstack((beta_1, beta_2, gamma))
proc.generateData((1000, 3, 3), (beta_1, beta_2, gamma))
proc.simulate(Theta_init, times=5) | import numpy as np
import EM_algo
from importlib import reload
reload(EM_algo)
if __name__ == "__main__":
# def Theta_init():
# beta_1_p = (np.random.random([3, 1]) - 0.5) * 10;
# beta_2_p = (np.random.random([3, 1]) - 0.5) * 10
# gamma_p = (np.random.random([3, 1]) - 0.5) * 10;
# sigma_p = 2
# return np.vstack([beta_1_p, beta_2_p, gamma_p, sigma_p])
# beta_1 = np.array([-1, 2, 3]).reshape(-1,1);
# beta_2 = np.array([2, -3, 4]).reshape(-1,1);
# gamma = np.array([2, -1, -1]).reshape(-1,1);
# sigma = 0.5
def Theta_init():
beta_1_p = ((np.random.random([3, 1])) - 0.5)/10;
beta_2_p = ((np.random.random([3, 1])) - 0.5)/10;
gamma_p = ((np.random.random([3, 1])) - 0.5)/10;
return np.vstack([beta_1_p, beta_2_p, gamma_p])
beta_1 = np.array([-1, 1, 1]).reshape(-1,1);
beta_2 = np.array([1.5, -1, 0.5]).reshape(-1,1);
gamma = np.array([2, -1.5, -0.5]).reshape(-1,1)
proc = EM_algo.EM_Algorithm(distribution="Poisson")
# proc.ans = np.vstack((beta_1, beta_2, gamma, sigma))
# proc.generateData((500, 3, 3), (beta_1, beta_2, gamma, sigma))
proc.ans = np.vstack((beta_1, beta_2, gamma))
proc.generateData((1000, 3, 3), (beta_1, beta_2, gamma))
proc.simulate(Theta_init, times=5) | en | 0.121128 | # def Theta_init(): # beta_1_p = (np.random.random([3, 1]) - 0.5) * 10; # beta_2_p = (np.random.random([3, 1]) - 0.5) * 10 # gamma_p = (np.random.random([3, 1]) - 0.5) * 10; # sigma_p = 2 # return np.vstack([beta_1_p, beta_2_p, gamma_p, sigma_p]) # beta_1 = np.array([-1, 2, 3]).reshape(-1,1); # beta_2 = np.array([2, -3, 4]).reshape(-1,1); # gamma = np.array([2, -1, -1]).reshape(-1,1); # sigma = 0.5 # proc.ans = np.vstack((beta_1, beta_2, gamma, sigma)) # proc.generateData((500, 3, 3), (beta_1, beta_2, gamma, sigma)) | 2.912087 | 3 |
tests/test_dump.py | DNXLabs/ssm-loader | 0 | 6618824 | from moto import mock_ssm
from ssm.cli import dump
from tests.fixtures import aws_credentials, ssm, ssm_put_parameter, runner
from tests.fixtures import ssm_parameters, ssm_empty_parameters
def test_dump_output_without_params(runner, ssm, ssm_empty_parameters):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['/app/env/ssm'])
assert result.exit_code == 0
assert result.output == ssm_empty_parameters
def test_dump_output_with_params(runner, ssm, ssm_put_parameter, ssm_parameters):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['/app/env'])
assert result.exit_code == 0
assert result.output == ssm_parameters
def test_dump_with_long_option_output_file(runner, ssm, ssm_put_parameter):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['--output', 'file.json', '/app/env'])
outfile = open('file.json', 'r')
assert result.exit_code == 0
assert result.output == outfile.read() + '\n'
def test_dump_with_short_option_output_file(runner, ssm, ssm_put_parameter):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['-o', 'file.json', '/app/env'])
outfile = open('file.json', 'r')
assert result.exit_code == 0
assert result.output == outfile.read() + '\n'
def test_dump_with_default_output_file(runner, ssm, ssm_put_parameter):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['/app/env'])
outfile = open('.env.ssm.json', 'r')
assert result.exit_code == 0
assert result.output == outfile.read() + '\n'
def test_dump_with_output_file_wront_termination(runner, ssm):
result = runner.invoke(dump, ['--output', 'file', '/app/env'])
assert result.exit_code == 1
assert result.output == 'Output file must ends with .json\n'
| from moto import mock_ssm
from ssm.cli import dump
from tests.fixtures import aws_credentials, ssm, ssm_put_parameter, runner
from tests.fixtures import ssm_parameters, ssm_empty_parameters
def test_dump_output_without_params(runner, ssm, ssm_empty_parameters):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['/app/env/ssm'])
assert result.exit_code == 0
assert result.output == ssm_empty_parameters
def test_dump_output_with_params(runner, ssm, ssm_put_parameter, ssm_parameters):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['/app/env'])
assert result.exit_code == 0
assert result.output == ssm_parameters
def test_dump_with_long_option_output_file(runner, ssm, ssm_put_parameter):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['--output', 'file.json', '/app/env'])
outfile = open('file.json', 'r')
assert result.exit_code == 0
assert result.output == outfile.read() + '\n'
def test_dump_with_short_option_output_file(runner, ssm, ssm_put_parameter):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['-o', 'file.json', '/app/env'])
outfile = open('file.json', 'r')
assert result.exit_code == 0
assert result.output == outfile.read() + '\n'
def test_dump_with_default_output_file(runner, ssm, ssm_put_parameter):
with runner.isolated_filesystem():
result = runner.invoke(dump, ['/app/env'])
outfile = open('.env.ssm.json', 'r')
assert result.exit_code == 0
assert result.output == outfile.read() + '\n'
def test_dump_with_output_file_wront_termination(runner, ssm):
result = runner.invoke(dump, ['--output', 'file', '/app/env'])
assert result.exit_code == 1
assert result.output == 'Output file must ends with .json\n'
| none | 1 | 2.014566 | 2 | |
mt_metadata/transfer_functions/emtf_xml/data_quality_warnings.py | kujaku11/mt_metadata | 10 | 6618825 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 21:30:36 2020
:copyright:
<NAME> (<EMAIL>)
:license: MIT
"""
# =============================================================================
# Imports
# =============================================================================
from mt_metadata.base.helpers import write_lines
from mt_metadata.base import get_schema, Base
from .standards import SCHEMA_FN_PATHS
from . import Comment
# =============================================================================
attr_dict = get_schema("data_quality_warnings", SCHEMA_FN_PATHS)
# =============================================================================
class DataQualityWarnings(Base):
__doc__ = write_lines(attr_dict)
def __init__(self, **kwargs):
self.flag = 0
self.comments = Comment()
super().__init__(attr_dict=attr_dict, **kwargs)
| # -*- coding: utf-8 -*-
"""
Created on Wed Dec 23 21:30:36 2020
:copyright:
<NAME> (<EMAIL>)
:license: MIT
"""
# =============================================================================
# Imports
# =============================================================================
from mt_metadata.base.helpers import write_lines
from mt_metadata.base import get_schema, Base
from .standards import SCHEMA_FN_PATHS
from . import Comment
# =============================================================================
attr_dict = get_schema("data_quality_warnings", SCHEMA_FN_PATHS)
# =============================================================================
class DataQualityWarnings(Base):
__doc__ = write_lines(attr_dict)
def __init__(self, **kwargs):
self.flag = 0
self.comments = Comment()
super().__init__(attr_dict=attr_dict, **kwargs)
| en | 0.434716 | # -*- coding: utf-8 -*- Created on Wed Dec 23 21:30:36 2020 :copyright: <NAME> (<EMAIL>) :license: MIT # ============================================================================= # Imports # ============================================================================= # ============================================================================= # ============================================================================= | 1.888093 | 2 |
hackerrank/Algorithms/Cut Tree/solution.py | ATrain951/01.python-com_Qproject | 4 | 6618826 | <reponame>ATrain951/01.python-com_Qproject
#!/bin/python3
import os
#
# Complete the cuttree function below.
#
def cuttree(n, k, edges):
#
# Write your code here.
#
from collections import defaultdict
adj = defaultdict(list)
dp = [[0] * (k + 1) for _ in range(n)]
for u, v in edges:
adj[u - 1].append(v - 1)
adj[v - 1].append(u - 1)
def compute(x, val, graph, matrix, most):
matrix[x][0] = 1
for y in graph[x]:
if y != val:
compute(y, x, graph, matrix, most)
for i in range(most, -1, -1):
matrix[x][i] *= matrix[y][0]
for j in range(1, i + 1):
matrix[x][i] += matrix[x][i - j] * matrix[y][j]
if i > 0:
matrix[x][i] += matrix[x][i - 1]
compute(0, 0, adj, dp, k)
ans = 0
for u in range(n):
for v in range(0, k + (u == 0)):
ans += dp[u][v]
return ans + 1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
edges = []
for _ in range(n - 1):
edges.append(list(map(int, input().rstrip().split())))
result = cuttree(n, k, edges)
fptr.write(str(result) + '\n')
fptr.close()
| #!/bin/python3
import os
#
# Complete the cuttree function below.
#
def cuttree(n, k, edges):
#
# Write your code here.
#
from collections import defaultdict
adj = defaultdict(list)
dp = [[0] * (k + 1) for _ in range(n)]
for u, v in edges:
adj[u - 1].append(v - 1)
adj[v - 1].append(u - 1)
def compute(x, val, graph, matrix, most):
matrix[x][0] = 1
for y in graph[x]:
if y != val:
compute(y, x, graph, matrix, most)
for i in range(most, -1, -1):
matrix[x][i] *= matrix[y][0]
for j in range(1, i + 1):
matrix[x][i] += matrix[x][i - j] * matrix[y][j]
if i > 0:
matrix[x][i] += matrix[x][i - 1]
compute(0, 0, adj, dp, k)
ans = 0
for u in range(n):
for v in range(0, k + (u == 0)):
ans += dp[u][v]
return ans + 1
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
nk = input().split()
n = int(nk[0])
k = int(nk[1])
edges = []
for _ in range(n - 1):
edges.append(list(map(int, input().rstrip().split())))
result = cuttree(n, k, edges)
fptr.write(str(result) + '\n')
fptr.close() | en | 0.763856 | #!/bin/python3 # # Complete the cuttree function below. # # # Write your code here. # | 3.137258 | 3 |
nbsvm/_nbsvm.py | fastforwardlabs/nbsvm | 4 | 6618827 | <reponame>fastforwardlabs/nbsvm
"""
sklearn interface to NBSVM classifier
"""
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.validation import check_X_y
from sklearn.utils.multiclass import unique_labels
from sklearn.svm import LinearSVC
from scipy.sparse.csr import csr_matrix
import scipy
class NBSVM(BaseEstimator, LinearClassifierMixin):
"""
A NBSVM classifier following the sklearn API, as described in Section 2.3
of Baselines and bigrams: simple, good sentiment and topic classification.
https://nlp.stanford.edu/pubs/sidaw12_simple_sentiment.pdf
Parameters
----------
alpha : float, default=1.
Smoothing parameter for count vectors.
beta : float, default=0.25
Interpolation parameter between NB and SVM.
C : float, default=1.
Penalty parameter of the L2 error term for SVM.
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
coef_ : array, shape = [1, n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features, per sklearn.svm.LinearSVC.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function, per sklearn.svm.LinearSVC.
"""
def __init__(self, alpha=1.0, beta=0.25, C=1.0):
self.alpha = alpha
self.beta = beta
self.C = C
def fit(self, X, y):
"""
Fit the NBSVM to a dataset.
Parameters
----------
X : scipy.sparse.csr_matrix or numpy.ndarray,
shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values. An array of int.
Returns
-------
self : object
Returns self.
"""
X, y = self._validate(X, y)
self.classes_ = unique_labels(y)
coefficients, intercepts = self._fit_one_model_per_class(X, y)
self.coef_ = np.concatenate(coefficients)
self.intercept_ = np.concatenate(intercepts)
return self
def _validate(self, X, y):
"""
Validate that X and y are the correct shape, and that X contains no
negative entries.
"""
X, y = check_X_y(X, y, accept_sparse="csr")
if scipy.sparse.issparse(X):
self._validate_sparse(X)
elif isinstance(X, np.ndarray):
self._validate_dense(X)
else:
raise ValueError("""
Not a scipy.sparse.csr.csr_matrix or numpy ndarray
""")
return X, y
def _validate_sparse(self, X):
if (X.data < 0.0).any():
raise ValueError("All X entries should be non-negative")
def _validate_dense(self, X):
if (X < 0.0).any():
raise ValueError("All X entries should be non-negative")
def _fit_one_model_per_class(self, X, y):
"""
Treat an n-class classification problem as n binary classification
problems.
"""
binary_models = [
self._fit_binary_nbsvm(X, y == class_) for class_ in self.classes_
]
coefficients, intercepts = zip(*binary_models)
return coefficients, intercepts
def _fit_binary_nbsvm(self, X, y):
"""
Fit a NBSVM classifier to a binary classification problem.
"""
r = self._log_count_ratio(X, y)
X = X.multiply(r)
svm = LinearSVC(C=self.C).fit(X, y)
coef = self._interpolate(svm.coef_)
coef *= r
return coef, svm.intercept_
def _log_count_ratio(self, X, y):
"""
Log-count ratio computed from smoothed (by alpha) count vectors for
each class. These are the coefficients in pure Multinomial Naive Bayes.
"""
p = self.alpha + X[y == 1].sum(axis=0)
q = self.alpha + X[y == 0].sum(axis=0)
r = (self._log_normalize_count_vector(p) -
self._log_normalize_count_vector(q))
return r
def _log_normalize_count_vector(self, arr):
"""
Takes count vector and normalizes by L1 norm, then takes log.
"""
return np.log(arr / np.linalg.norm(arr, 1))
def _interpolate(self, coef):
"""
Interpolate with parameter beta between Multinomial Naive Bayes
(mean_weight) and SVM.
"""
mean_weight = np.abs(coef).mean()
return self.beta * coef + (1 - self.beta) * mean_weight
| """
sklearn interface to NBSVM classifier
"""
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.validation import check_X_y
from sklearn.utils.multiclass import unique_labels
from sklearn.svm import LinearSVC
from scipy.sparse.csr import csr_matrix
import scipy
class NBSVM(BaseEstimator, LinearClassifierMixin):
"""
A NBSVM classifier following the sklearn API, as described in Section 2.3
of Baselines and bigrams: simple, good sentiment and topic classification.
https://nlp.stanford.edu/pubs/sidaw12_simple_sentiment.pdf
Parameters
----------
alpha : float, default=1.
Smoothing parameter for count vectors.
beta : float, default=0.25
Interpolation parameter between NB and SVM.
C : float, default=1.
Penalty parameter of the L2 error term for SVM.
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
coef_ : array, shape = [1, n_features] if n_classes == 2
else [n_classes, n_features]
Weights assigned to the features, per sklearn.svm.LinearSVC.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function, per sklearn.svm.LinearSVC.
"""
def __init__(self, alpha=1.0, beta=0.25, C=1.0):
self.alpha = alpha
self.beta = beta
self.C = C
def fit(self, X, y):
"""
Fit the NBSVM to a dataset.
Parameters
----------
X : scipy.sparse.csr_matrix or numpy.ndarray,
shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values. An array of int.
Returns
-------
self : object
Returns self.
"""
X, y = self._validate(X, y)
self.classes_ = unique_labels(y)
coefficients, intercepts = self._fit_one_model_per_class(X, y)
self.coef_ = np.concatenate(coefficients)
self.intercept_ = np.concatenate(intercepts)
return self
def _validate(self, X, y):
"""
Validate that X and y are the correct shape, and that X contains no
negative entries.
"""
X, y = check_X_y(X, y, accept_sparse="csr")
if scipy.sparse.issparse(X):
self._validate_sparse(X)
elif isinstance(X, np.ndarray):
self._validate_dense(X)
else:
raise ValueError("""
Not a scipy.sparse.csr.csr_matrix or numpy ndarray
""")
return X, y
def _validate_sparse(self, X):
if (X.data < 0.0).any():
raise ValueError("All X entries should be non-negative")
def _validate_dense(self, X):
if (X < 0.0).any():
raise ValueError("All X entries should be non-negative")
def _fit_one_model_per_class(self, X, y):
"""
Treat an n-class classification problem as n binary classification
problems.
"""
binary_models = [
self._fit_binary_nbsvm(X, y == class_) for class_ in self.classes_
]
coefficients, intercepts = zip(*binary_models)
return coefficients, intercepts
def _fit_binary_nbsvm(self, X, y):
"""
Fit a NBSVM classifier to a binary classification problem.
"""
r = self._log_count_ratio(X, y)
X = X.multiply(r)
svm = LinearSVC(C=self.C).fit(X, y)
coef = self._interpolate(svm.coef_)
coef *= r
return coef, svm.intercept_
def _log_count_ratio(self, X, y):
"""
Log-count ratio computed from smoothed (by alpha) count vectors for
each class. These are the coefficients in pure Multinomial Naive Bayes.
"""
p = self.alpha + X[y == 1].sum(axis=0)
q = self.alpha + X[y == 0].sum(axis=0)
r = (self._log_normalize_count_vector(p) -
self._log_normalize_count_vector(q))
return r
def _log_normalize_count_vector(self, arr):
"""
Takes count vector and normalizes by L1 norm, then takes log.
"""
return np.log(arr / np.linalg.norm(arr, 1))
def _interpolate(self, coef):
"""
Interpolate with parameter beta between Multinomial Naive Bayes
(mean_weight) and SVM.
"""
mean_weight = np.abs(coef).mean()
return self.beta * coef + (1 - self.beta) * mean_weight | en | 0.661717 | sklearn interface to NBSVM classifier A NBSVM classifier following the sklearn API, as described in Section 2.3 of Baselines and bigrams: simple, good sentiment and topic classification. https://nlp.stanford.edu/pubs/sidaw12_simple_sentiment.pdf Parameters ---------- alpha : float, default=1. Smoothing parameter for count vectors. beta : float, default=0.25 Interpolation parameter between NB and SVM. C : float, default=1. Penalty parameter of the L2 error term for SVM. Attributes ---------- X_ : ndarray, shape (n_samples, n_features) The input passed during :meth:`fit`. y_ : ndarray, shape (n_samples,) The labels passed during :meth:`fit`. classes_ : ndarray, shape (n_classes,) The classes seen at :meth:`fit`. coef_ : array, shape = [1, n_features] if n_classes == 2 else [n_classes, n_features] Weights assigned to the features, per sklearn.svm.LinearSVC. intercept_ : array, shape = [1] if n_classes == 2 else [n_classes] Constants in decision function, per sklearn.svm.LinearSVC. Fit the NBSVM to a dataset. Parameters ---------- X : scipy.sparse.csr_matrix or numpy.ndarray, shape (n_samples, n_features) The training input samples. y : array-like, shape (n_samples,) The target values. An array of int. Returns ------- self : object Returns self. Validate that X and y are the correct shape, and that X contains no negative entries. Not a scipy.sparse.csr.csr_matrix or numpy ndarray Treat an n-class classification problem as n binary classification problems. Fit a NBSVM classifier to a binary classification problem. Log-count ratio computed from smoothed (by alpha) count vectors for each class. These are the coefficients in pure Multinomial Naive Bayes. Takes count vector and normalizes by L1 norm, then takes log. Interpolate with parameter beta between Multinomial Naive Bayes (mean_weight) and SVM. | 2.950207 | 3 |
handlers/user_location.py | openmaker-eu/watchtower | 2 | 6618828 | """
User Location Handlers for Watchtower
"""
__author__ = ['<NAME>', '<NAME>']
import tornado.web
import tornado.escape
from handlers.base import BaseHandler, TemplateRendering, Api500ErrorHandler
from apis import apiv13
class PredictedLocationV13Handler(BaseHandler, TemplateRendering, Api500ErrorHandler):
def get(self):
try:
user_ids = [int(x) for x in self.get_argument("user_ids",default="",strip=True).split(",")]
except:
user_ids = []
locations = apiv13.getPredictedLocations(user_ids)
self.set_header('Content-Type', 'application/json')
self.write(locations)
| """
User Location Handlers for Watchtower
"""
__author__ = ['<NAME>', '<NAME>']
import tornado.web
import tornado.escape
from handlers.base import BaseHandler, TemplateRendering, Api500ErrorHandler
from apis import apiv13
class PredictedLocationV13Handler(BaseHandler, TemplateRendering, Api500ErrorHandler):
def get(self):
try:
user_ids = [int(x) for x in self.get_argument("user_ids",default="",strip=True).split(",")]
except:
user_ids = []
locations = apiv13.getPredictedLocations(user_ids)
self.set_header('Content-Type', 'application/json')
self.write(locations)
| en | 0.705792 | User Location Handlers for Watchtower | 2.247406 | 2 |
Detection_and_control/ASUM-GUI-with-detection-and-control/layers/modules/__init__.py | kebijuelun/ASUM | 2 | 6618829 | <reponame>kebijuelun/ASUM
from .l2norm import L2Norm
from .multibox_loss import MultiBoxLoss
from .multibox_loss_addfocalloss import FocalLoss_weighted
from .multibox_loss_addweightedfocalloss import FocalLoss_decayweighted
__all__ = ["L2Norm", "MultiBoxLoss"]
| from .l2norm import L2Norm
from .multibox_loss import MultiBoxLoss
from .multibox_loss_addfocalloss import FocalLoss_weighted
from .multibox_loss_addweightedfocalloss import FocalLoss_decayweighted
__all__ = ["L2Norm", "MultiBoxLoss"] | none | 1 | 1.059094 | 1 | |
mcp3008-and-gpio-to-midi.py | whofferbert/mcp3008-and-gpio-to-midi | 0 | 6618830 | #!/usr/bin/python3
# a script to watch some GPIO pins, as well
# as an mcp3008, and spit out midi info about it
import mido
import re
import os
import time
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
from uresponsivevalue.uresponsivevalue import ResponsiveValue
import RPi.GPIO as GPIO
import threading
# the name of how it'll show up
midiName = "The_Never_MIDI"
# TODO these callbacks should be better
def my_callback(channel):
# 11
if GPIO.input(channel) == GPIO.HIGH:
send_cc(0, 11, 0)
#print('1 ▼ ')
else:
send_cc(0, 11, 127)
#print('1 ▲ ')
def my_callback2(channel):
# 12
if GPIO.input(channel) == GPIO.HIGH:
send_cc(0, 12, 0)
#print('2 ▼ ')
else:
send_cc(0, 12, 127)
#print('2 ▲ ')
# create the spi bus for the mcp3008 (must be attached to spi bus)
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select) (this can be any unused GPIO pin)
cs = digitalio.DigitalInOut(board.D12)
# create the mcp object:
mcp = MCP.MCP3008(spi, cs)
# extend AnalogIn class with some bits for memory and call based interaction
class AnalogInMidi(AnalogIn):
last = 0
midiAssignment = 0
# this is necessary to work with ResponsiveValue
def readVal(self):
return self.value
# set tolerance for something reasonable for midi
adcTolerance = 65535 / 128 * 1.25
# create an analog input channel on pin 0-3 of the ADC
# and their various properties
# bottom forward
chan0 = AnalogInMidi(mcp, MCP.P0)
chan0.midiAssignment = 13
chan0.smoothed = ResponsiveValue(chan0.readVal, max_value=65535, activity_threshold=adcTolerance)
# top forward
chan1 = AnalogInMidi(mcp, MCP.P1)
chan1.midiAssignment = 7
chan1.smoothed = ResponsiveValue(chan1.readVal, max_value=65535, activity_threshold=adcTolerance)
# bottom back
chan2 = AnalogInMidi(mcp, MCP.P2)
chan2.midiAssignment = 10
chan2.smoothed = ResponsiveValue(chan2.readVal, max_value=65535, activity_threshold=adcTolerance)
# top back
chan3 = AnalogInMidi(mcp, MCP.P3)
chan3.midiAssignment = 9
chan3.smoothed = ResponsiveValue(chan3.readVal, max_value=65535, activity_threshold=adcTolerance)
# array of knobs
knobArr = [chan0, chan1, chan2, chan3]
# set GPIO for raspberry pi
GPIO.setmode(GPIO.BCM)
# https://raspberrypi.stackexchange.com/questions/76667/debouncing-buttons-with-rpi-gpio-too-many-events-detected
class ButtonHandler(threading.Thread):
def __init__(self, pin, func, edge='both', bouncetime=200):
super().__init__(daemon=True)
self.edge = edge
self.func = func
self.pin = pin
self.bouncetime = float(bouncetime)/1000
self.lastpinval = GPIO.input(self.pin)
self.lock = threading.Lock()
def __call__(self, *args):
if not self.lock.acquire(blocking=False):
return
t = threading.Timer(self.bouncetime, self.read, args=args)
t.start()
def read(self, *args):
pinval = GPIO.input(self.pin)
if (
((pinval == 0 and self.lastpinval == 1) and
(self.edge in ['falling', 'both'])) or
((pinval == 1 and self.lastpinval == 0) and
(self.edge in ['rising', 'both']))
):
self.func(*args)
self.lastpinval = pinval
self.lock.release()
# set up interrupt pins for switches, 10ms debounce
# forward button
GPIO.setup(6, GPIO.IN, pull_up_down=GPIO.PUD_UP)
tmp_cb1 = ButtonHandler(6, my_callback, edge='both', bouncetime=10)
tmp_cb1.start()
GPIO.add_event_detect(6, GPIO.BOTH, callback=tmp_cb1)
# back button
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_UP)
tmp_cb2 = ButtonHandler(13, my_callback2, edge='both', bouncetime=10)
tmp_cb2.start()
GPIO.add_event_detect(13, GPIO.BOTH, callback=tmp_cb2)
# this remaps a value from original (left) range to new (right) range
def remap_range(value, left_min, left_max, right_min, right_max):
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
# Convert the left range into a 0-1 range (int)
valueScaled = int(value - left_min) / int(left_span)
# Convert the 0-1 range into a value in the right range.
return int(right_min + (valueScaled * right_span))
# send midi cc messages
def send_cc(channel, ccnum, val):
msg = mido.Message('control_change', channel=channel, control=ccnum, value=val)
output = mido.open_output(midi_output_device)
output.send(msg)
# check linux OS for amidithru running already
def check_for_running_midi():
# TODO make this better, it's not pythonic at all
# if only I weren't a linux sysadmin
checkGrep = 'ps -ef | grep -Po "amidithru\s*' + midiName + '" | grep -v grep >/dev/null'
check = os.system(checkGrep)
#print("check val is %s" % check)
# 0 = running
# 256/anything else = nope
return check
# set up backend with mido
def setup_midi_backend():
mido.set_backend('mido.backends.rtmidi')
# system command to set up the midi thru port
if check_for_running_midi():
runCmd = "amidithru '" + midiName + "' &"
os.system(runCmd)
# wait a sec for amidithru to do it's thing
time.sleep(1)
# regex to match on rtmidi port name convention
# TODO is it necessary to write: "\s+(\d+)?:\d+)" instead?
nameRegex = "(" + midiName + ":" + midiName + "\s+\d+:\d+)"
matcher = re.compile(nameRegex)
newList = list(filter(matcher.match, mido.get_output_names()))
# all to get the name of the thing we just made
global midi_output_device
midi_output_device = newList[0]
#print("Using MIDI device:", midi_output_device)
# run checks and updates forever
def loop():
while True:
for knob in knobArr:
# we'll assume that the pot didn't move
trim_pot_changed = False
# update the analog pin
knob.smoothed.update()
# get smoothed value
trim_pot = knob.smoothed.responsive_value
# convert to midi range
# convert 16bit adc0 (0-65535) trim pot read into 0-127 volume level.
# weird misnomer. the mcp3008 is 10 bit resolution, but this library
# defaults to 16 bits of resolution on analog inputs
set_midi = remap_range(trim_pot, 0, 65535, 0, 127)
# how much has it changed since the last read?
pot_adjust = abs(set_midi - knob.last)
if pot_adjust > 0:
trim_pot_changed = True
if trim_pot_changed:
#print('midival = {volume} for pot {id}' .format(volume = set_midi, id = knob.midiAssignment))
send_cc(0, knob.midiAssignment, set_midi)
# save the potentiometer reading for the next loop
knob.last = set_midi
# hang out for a bit, 10ms
time.sleep(0.01)
def run():
# set up the midi stuff
setup_midi_backend()
# do initial callbacks to set button states
my_callback(6)
my_callback2(13)
# then just loop
loop()
if __name__ == "__main__":
run()
| #!/usr/bin/python3
# a script to watch some GPIO pins, as well
# as an mcp3008, and spit out midi info about it
import mido
import re
import os
import time
import busio
import digitalio
import board
import adafruit_mcp3xxx.mcp3008 as MCP
from adafruit_mcp3xxx.analog_in import AnalogIn
from uresponsivevalue.uresponsivevalue import ResponsiveValue
import RPi.GPIO as GPIO
import threading
# the name of how it'll show up
midiName = "The_Never_MIDI"
# TODO these callbacks should be better
def my_callback(channel):
# 11
if GPIO.input(channel) == GPIO.HIGH:
send_cc(0, 11, 0)
#print('1 ▼ ')
else:
send_cc(0, 11, 127)
#print('1 ▲ ')
def my_callback2(channel):
# 12
if GPIO.input(channel) == GPIO.HIGH:
send_cc(0, 12, 0)
#print('2 ▼ ')
else:
send_cc(0, 12, 127)
#print('2 ▲ ')
# create the spi bus for the mcp3008 (must be attached to spi bus)
spi = busio.SPI(clock=board.SCK, MISO=board.MISO, MOSI=board.MOSI)
# create the cs (chip select) (this can be any unused GPIO pin)
cs = digitalio.DigitalInOut(board.D12)
# create the mcp object:
mcp = MCP.MCP3008(spi, cs)
# extend AnalogIn class with some bits for memory and call based interaction
class AnalogInMidi(AnalogIn):
last = 0
midiAssignment = 0
# this is necessary to work with ResponsiveValue
def readVal(self):
return self.value
# set tolerance for something reasonable for midi
adcTolerance = 65535 / 128 * 1.25
# create an analog input channel on pin 0-3 of the ADC
# and their various properties
# bottom forward
chan0 = AnalogInMidi(mcp, MCP.P0)
chan0.midiAssignment = 13
chan0.smoothed = ResponsiveValue(chan0.readVal, max_value=65535, activity_threshold=adcTolerance)
# top forward
chan1 = AnalogInMidi(mcp, MCP.P1)
chan1.midiAssignment = 7
chan1.smoothed = ResponsiveValue(chan1.readVal, max_value=65535, activity_threshold=adcTolerance)
# bottom back
chan2 = AnalogInMidi(mcp, MCP.P2)
chan2.midiAssignment = 10
chan2.smoothed = ResponsiveValue(chan2.readVal, max_value=65535, activity_threshold=adcTolerance)
# top back
chan3 = AnalogInMidi(mcp, MCP.P3)
chan3.midiAssignment = 9
chan3.smoothed = ResponsiveValue(chan3.readVal, max_value=65535, activity_threshold=adcTolerance)
# array of knobs
knobArr = [chan0, chan1, chan2, chan3]
# set GPIO for raspberry pi
GPIO.setmode(GPIO.BCM)
# https://raspberrypi.stackexchange.com/questions/76667/debouncing-buttons-with-rpi-gpio-too-many-events-detected
class ButtonHandler(threading.Thread):
def __init__(self, pin, func, edge='both', bouncetime=200):
super().__init__(daemon=True)
self.edge = edge
self.func = func
self.pin = pin
self.bouncetime = float(bouncetime)/1000
self.lastpinval = GPIO.input(self.pin)
self.lock = threading.Lock()
def __call__(self, *args):
if not self.lock.acquire(blocking=False):
return
t = threading.Timer(self.bouncetime, self.read, args=args)
t.start()
def read(self, *args):
pinval = GPIO.input(self.pin)
if (
((pinval == 0 and self.lastpinval == 1) and
(self.edge in ['falling', 'both'])) or
((pinval == 1 and self.lastpinval == 0) and
(self.edge in ['rising', 'both']))
):
self.func(*args)
self.lastpinval = pinval
self.lock.release()
# set up interrupt pins for switches, 10ms debounce
# forward button
GPIO.setup(6, GPIO.IN, pull_up_down=GPIO.PUD_UP)
tmp_cb1 = ButtonHandler(6, my_callback, edge='both', bouncetime=10)
tmp_cb1.start()
GPIO.add_event_detect(6, GPIO.BOTH, callback=tmp_cb1)
# back button
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_UP)
tmp_cb2 = ButtonHandler(13, my_callback2, edge='both', bouncetime=10)
tmp_cb2.start()
GPIO.add_event_detect(13, GPIO.BOTH, callback=tmp_cb2)
# this remaps a value from original (left) range to new (right) range
def remap_range(value, left_min, left_max, right_min, right_max):
# Figure out how 'wide' each range is
left_span = left_max - left_min
right_span = right_max - right_min
# Convert the left range into a 0-1 range (int)
valueScaled = int(value - left_min) / int(left_span)
# Convert the 0-1 range into a value in the right range.
return int(right_min + (valueScaled * right_span))
# send midi cc messages
def send_cc(channel, ccnum, val):
msg = mido.Message('control_change', channel=channel, control=ccnum, value=val)
output = mido.open_output(midi_output_device)
output.send(msg)
# check linux OS for amidithru running already
def check_for_running_midi():
# TODO make this better, it's not pythonic at all
# if only I weren't a linux sysadmin
checkGrep = 'ps -ef | grep -Po "amidithru\s*' + midiName + '" | grep -v grep >/dev/null'
check = os.system(checkGrep)
#print("check val is %s" % check)
# 0 = running
# 256/anything else = nope
return check
# set up backend with mido
def setup_midi_backend():
mido.set_backend('mido.backends.rtmidi')
# system command to set up the midi thru port
if check_for_running_midi():
runCmd = "amidithru '" + midiName + "' &"
os.system(runCmd)
# wait a sec for amidithru to do it's thing
time.sleep(1)
# regex to match on rtmidi port name convention
# TODO is it necessary to write: "\s+(\d+)?:\d+)" instead?
nameRegex = "(" + midiName + ":" + midiName + "\s+\d+:\d+)"
matcher = re.compile(nameRegex)
newList = list(filter(matcher.match, mido.get_output_names()))
# all to get the name of the thing we just made
global midi_output_device
midi_output_device = newList[0]
#print("Using MIDI device:", midi_output_device)
# run checks and updates forever
def loop():
while True:
for knob in knobArr:
# we'll assume that the pot didn't move
trim_pot_changed = False
# update the analog pin
knob.smoothed.update()
# get smoothed value
trim_pot = knob.smoothed.responsive_value
# convert to midi range
# convert 16bit adc0 (0-65535) trim pot read into 0-127 volume level.
# weird misnomer. the mcp3008 is 10 bit resolution, but this library
# defaults to 16 bits of resolution on analog inputs
set_midi = remap_range(trim_pot, 0, 65535, 0, 127)
# how much has it changed since the last read?
pot_adjust = abs(set_midi - knob.last)
if pot_adjust > 0:
trim_pot_changed = True
if trim_pot_changed:
#print('midival = {volume} for pot {id}' .format(volume = set_midi, id = knob.midiAssignment))
send_cc(0, knob.midiAssignment, set_midi)
# save the potentiometer reading for the next loop
knob.last = set_midi
# hang out for a bit, 10ms
time.sleep(0.01)
def run():
# set up the midi stuff
setup_midi_backend()
# do initial callbacks to set button states
my_callback(6)
my_callback2(13)
# then just loop
loop()
if __name__ == "__main__":
run()
| en | 0.79764 | #!/usr/bin/python3 # a script to watch some GPIO pins, as well # as an mcp3008, and spit out midi info about it # the name of how it'll show up # TODO these callbacks should be better # 11 #print('1 ▼ ') #print('1 ▲ ') # 12 #print('2 ▼ ') #print('2 ▲ ') # create the spi bus for the mcp3008 (must be attached to spi bus) # create the cs (chip select) (this can be any unused GPIO pin) # create the mcp object: # extend AnalogIn class with some bits for memory and call based interaction # this is necessary to work with ResponsiveValue # set tolerance for something reasonable for midi # create an analog input channel on pin 0-3 of the ADC # and their various properties # bottom forward # top forward # bottom back # top back # array of knobs # set GPIO for raspberry pi # https://raspberrypi.stackexchange.com/questions/76667/debouncing-buttons-with-rpi-gpio-too-many-events-detected # set up interrupt pins for switches, 10ms debounce # forward button # back button # this remaps a value from original (left) range to new (right) range # Figure out how 'wide' each range is # Convert the left range into a 0-1 range (int) # Convert the 0-1 range into a value in the right range. # send midi cc messages # check linux OS for amidithru running already # TODO make this better, it's not pythonic at all # if only I weren't a linux sysadmin #print("check val is %s" % check) # 0 = running # 256/anything else = nope # set up backend with mido # system command to set up the midi thru port # wait a sec for amidithru to do it's thing # regex to match on rtmidi port name convention # TODO is it necessary to write: "\s+(\d+)?:\d+)" instead? # all to get the name of the thing we just made #print("Using MIDI device:", midi_output_device) # run checks and updates forever # we'll assume that the pot didn't move # update the analog pin # get smoothed value # convert to midi range # convert 16bit adc0 (0-65535) trim pot read into 0-127 volume level. # weird misnomer. the mcp3008 is 10 bit resolution, but this library # defaults to 16 bits of resolution on analog inputs # how much has it changed since the last read? #print('midival = {volume} for pot {id}' .format(volume = set_midi, id = knob.midiAssignment)) # save the potentiometer reading for the next loop # hang out for a bit, 10ms # set up the midi stuff # do initial callbacks to set button states # then just loop | 3.13364 | 3 |
newgan/plotting/__init__.py | okitouni/NeWGAN | 0 | 6618831 | <reponame>okitouni/NeWGAN<filename>newgan/plotting/__init__.py<gh_stars>0
from .plotting import plot_train_hists
| from .plotting import plot_train_hists | none | 1 | 1.004401 | 1 | |
history/apps.py | hereischen/Goliath | 5 | 6618832 | <filename>history/apps.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class HistoryConfig(AppConfig):
name = 'history'
verbose_name = '库存历史记录'
verbose_name_plural = verbose_name
| <filename>history/apps.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class HistoryConfig(AppConfig):
name = 'history'
verbose_name = '库存历史记录'
verbose_name_plural = verbose_name
| en | 0.769321 | # -*- coding: utf-8 -*- | 1.386129 | 1 |
TZController.py | ismaelliang/TradeZeroWithCode | 1 | 6618833 | from pynput.mouse import Button, Controller as mController
from pynput.keyboard import Key, Controller as kController
import clipboard
from enum import Enum
import time
class MOUSE_POS(Enum):
"""
Mouse position enumerater
Config specific input or button's cursor position
"""
TITLE_TICKER = (1365, 23)
INPUT_TICKER = (1286, 57)
INPUT_PRICE = (1273, 492)
INPUT_QUANTITY = (1270, 462)
INPUT_QUANTITY_COPY = (1331, 496)
BTN_POSITION = (1282, 529)
BTN_BUY = (1287, 551)
BTN_SELL = (1371, 549)
BTN_SHORT = (1466, 545)
BTN_COVER = (1560, 549)
BTN_CANCEL = (1369, 527)
CDT_INPUT_TICKER = (71, 50)
CDT_1st_ACTION_SEL = (122, 140)
CDT_1st_ACTION_BUY = (85, 158)
CDT_1st_ACTION_SHORT = (81, 195)
CDT_1st_INPUT_QTY = (182, 142)
CDT_1st_TYPE_SEL = (326, 141)
CDT_1st_TYPE_LIMIT = (299, 176)
CDT_1st_INPUT_PRICE = (193, 173)
CDT_2nd_ACTION_SEL = (120, 259)
CDT_2nd_ACTION_SELL = (76, 296)
CDT_2nd_ACTION_COVER = (83, 331)
CDT_2nd_INPUT_QTY = (191, 262)
CDT_2nd_TYPE_SEL = (326, 260)
CDT_2nd_TYPE_RANGE = (290, 385)
CDT_2nd_INPUT_HIGH_PRICE = (407, 297)
CDT_2nd_INPUT_LOW_PRICE = (518, 296)
CDT_BTN_SEND = (93, 356)
CDT_BTN_CANCEL = (245, 355)
class TZController:
__ms = mController()
__kb = kController()
_origin_pos = None
def __init__(self):
return
'''
Some basic functions
'''
def __time_break(self):
time.sleep(0.1)
def _click(self, pos):
self.__ms.position = pos.value
self.__ms.click(Button.left)
self.__time_break()
def _dbl_clk(self, pos):
self.__ms.position = pos.value
self.__ms.click(Button.left, 2)
self.__time_break()
def _r_click(self, pos):
self.__ms.position = pos.value
self.__ms.click(Button.right)
self.__time_break()
def _typein(self, text):
self.__kb.type(text)
self.__time_break()
self.__kb.press(Key.enter)
self.__kb.release(Key.enter)
self.__time_break()
'''
Regular Order Actions
'''
def ticker(self, symbol):
'''
Change ticker for regular order & chartingd window
'''
self._dbl_clk( MOUSE_POS.TITLE_TICKER )
self._typein(symbol)
def price(self, price):
'''
Set order price
'''
self._dbl_clk( MOUSE_POS.INPUT_PRICE )
self._typein(str(price))
self._click( MOUSE_POS.TITLE_TICKER )
def qty(self, qty):
'''
Set order quantity
'''
self._dbl_clk( MOUSE_POS.INPUT_QUANTITY )
self._typein(str(qty))
self._click( MOUSE_POS.TITLE_TICKER )
def lmt(self, qty, price):
'''
Prepare lmt order with qty/price
'''
self.qty(qty)
self.price(price)
def buy(self):
'''
Click BUY button
'''
self._click( MOUSE_POS.BTN_BUY )
def sell(self):
'''
Click SELL button
'''
self._click( MOUSE_POS.BTN_SELL )
def short(self):
'''
Click SHORT button
'''
self._click( MOUSE_POS.BTN_SHORT )
def cover(self):
'''
Click COVER button
'''
self._click( MOUSE_POS.BTN_COVER )
def cancel(self):
'''
Click CANCEL button
'''
elf.__ms.position = MOUSE_POS.BTN_CANCEL.value
self.__ms.click(Button.left)
'''
Conditional Order Actions
'''
def cdt_ticker(self, symbol):
'''
Change ticker in Condition order window
Ensure condition order window opening
'''
self._dbl_clk( MOUSE_POS.CDT_INPUT_TICKER )
self._typein(symbol)
def cdt_buy_then_sell(self, target, stop, price, qty):
'''
Conditional order 1
'''
self._click( MOUSE_POS.CDT_1st_ACTION_SEL )
self._click( MOUSE_POS.CDT_1st_ACTION_BUY )
self._click( MOUSE_POS.CDT_1st_TYPE_SEL )
self._click( MOUSE_POS.CDT_1st_TYPE_LIMIT )
self._dbl_clk( MOUSE_POS.CDT_1st_INPUT_QTY )
self._typein(str(qty))
self._dbl_clk( MOUSE_POS.CDT_1st_INPUT_PRICE )
self._typein(str(price))
'''
Conditional order 2
'''
self._click( MOUSE_POS.CDT_2nd_ACTION_SEL )
self._click( MOUSE_POS.CDT_2nd_ACTION_SELL )
self._click( MOUSE_POS.CDT_2nd_TYPE_SEL )
self._click( MOUSE_POS.CDT_2nd_TYPE_RANGE )
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_QTY )
self._typein(str(qty))
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_HIGH_PRICE )
self._typein(str(target)) # higher price as target
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_LOW_PRICE )
self._typein(str(stop)) # lower price as stop
def cdt_short_then_cover(self, target, stop, price, qty):
'''
Conditional order 1
'''
self._click( MOUSE_POS.CDT_1st_ACTION_SEL )
self._click( MOUSE_POS.CDT_1st_ACTION_SHORT )
self._click( MOUSE_POS.CDT_1st_TYPE_SEL )
self._click( MOUSE_POS.CDT_1st_TYPE_LIMIT )
self._dbl_clk( MOUSE_POS.CDT_1st_INPUT_QTY )
self._typein(str(qty))
self._dbl_clk( MOUSE_POS.CDT_1st_INPUT_PRICE )
self._typein(str(price))
'''
Conditional order 2
'''
self._click( MOUSE_POS.CDT_2nd_ACTION_SEL )
self._click( MOUSE_POS.CDT_2nd_ACTION_COVER )
self._click( MOUSE_POS.CDT_2nd_TYPE_SEL )
self._click( MOUSE_POS.CDT_2nd_TYPE_RANGE )
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_QTY )
self._typein(str(qty))
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_HIGH_PRICE )
self._typein(str(stop)) # higher price as stop
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_LOW_PRICE )
self._typein(str(target)) # lower price as target
def cdt_send(self):
self._click( MOUSE_POS.CDT_BTN_SEND )
def cdt_cancel(self):
self._click( MOUSE_POS.CDT_BTN_CANCEL )
'''
Get
'''
def cur_pos(self):
'''
Return current position size for current ticker
'''
self._dbl_clk( MOUSE_POS.INPUT_QUANTITY )
self._r_click( MOUSE_POS.INPUT_QUANTITY )
self._click( MOUSE_POS.INPUT_QUANTITY_COPY )
return clipboard.paste()
def get_mouse_pos(self):
'''
Put your mouse on certain input or button,
Run this function to get its position,
Copy & paste it to config
'''
print('The current pointer position is {0}'.format(mController().position)) | from pynput.mouse import Button, Controller as mController
from pynput.keyboard import Key, Controller as kController
import clipboard
from enum import Enum
import time
class MOUSE_POS(Enum):
"""
Mouse position enumerater
Config specific input or button's cursor position
"""
TITLE_TICKER = (1365, 23)
INPUT_TICKER = (1286, 57)
INPUT_PRICE = (1273, 492)
INPUT_QUANTITY = (1270, 462)
INPUT_QUANTITY_COPY = (1331, 496)
BTN_POSITION = (1282, 529)
BTN_BUY = (1287, 551)
BTN_SELL = (1371, 549)
BTN_SHORT = (1466, 545)
BTN_COVER = (1560, 549)
BTN_CANCEL = (1369, 527)
CDT_INPUT_TICKER = (71, 50)
CDT_1st_ACTION_SEL = (122, 140)
CDT_1st_ACTION_BUY = (85, 158)
CDT_1st_ACTION_SHORT = (81, 195)
CDT_1st_INPUT_QTY = (182, 142)
CDT_1st_TYPE_SEL = (326, 141)
CDT_1st_TYPE_LIMIT = (299, 176)
CDT_1st_INPUT_PRICE = (193, 173)
CDT_2nd_ACTION_SEL = (120, 259)
CDT_2nd_ACTION_SELL = (76, 296)
CDT_2nd_ACTION_COVER = (83, 331)
CDT_2nd_INPUT_QTY = (191, 262)
CDT_2nd_TYPE_SEL = (326, 260)
CDT_2nd_TYPE_RANGE = (290, 385)
CDT_2nd_INPUT_HIGH_PRICE = (407, 297)
CDT_2nd_INPUT_LOW_PRICE = (518, 296)
CDT_BTN_SEND = (93, 356)
CDT_BTN_CANCEL = (245, 355)
class TZController:
__ms = mController()
__kb = kController()
_origin_pos = None
def __init__(self):
return
'''
Some basic functions
'''
def __time_break(self):
time.sleep(0.1)
def _click(self, pos):
self.__ms.position = pos.value
self.__ms.click(Button.left)
self.__time_break()
def _dbl_clk(self, pos):
self.__ms.position = pos.value
self.__ms.click(Button.left, 2)
self.__time_break()
def _r_click(self, pos):
self.__ms.position = pos.value
self.__ms.click(Button.right)
self.__time_break()
def _typein(self, text):
self.__kb.type(text)
self.__time_break()
self.__kb.press(Key.enter)
self.__kb.release(Key.enter)
self.__time_break()
'''
Regular Order Actions
'''
def ticker(self, symbol):
'''
Change ticker for regular order & chartingd window
'''
self._dbl_clk( MOUSE_POS.TITLE_TICKER )
self._typein(symbol)
def price(self, price):
'''
Set order price
'''
self._dbl_clk( MOUSE_POS.INPUT_PRICE )
self._typein(str(price))
self._click( MOUSE_POS.TITLE_TICKER )
def qty(self, qty):
'''
Set order quantity
'''
self._dbl_clk( MOUSE_POS.INPUT_QUANTITY )
self._typein(str(qty))
self._click( MOUSE_POS.TITLE_TICKER )
def lmt(self, qty, price):
'''
Prepare lmt order with qty/price
'''
self.qty(qty)
self.price(price)
def buy(self):
'''
Click BUY button
'''
self._click( MOUSE_POS.BTN_BUY )
def sell(self):
'''
Click SELL button
'''
self._click( MOUSE_POS.BTN_SELL )
def short(self):
'''
Click SHORT button
'''
self._click( MOUSE_POS.BTN_SHORT )
def cover(self):
'''
Click COVER button
'''
self._click( MOUSE_POS.BTN_COVER )
def cancel(self):
'''
Click CANCEL button
'''
elf.__ms.position = MOUSE_POS.BTN_CANCEL.value
self.__ms.click(Button.left)
'''
Conditional Order Actions
'''
def cdt_ticker(self, symbol):
'''
Change ticker in Condition order window
Ensure condition order window opening
'''
self._dbl_clk( MOUSE_POS.CDT_INPUT_TICKER )
self._typein(symbol)
def cdt_buy_then_sell(self, target, stop, price, qty):
'''
Conditional order 1
'''
self._click( MOUSE_POS.CDT_1st_ACTION_SEL )
self._click( MOUSE_POS.CDT_1st_ACTION_BUY )
self._click( MOUSE_POS.CDT_1st_TYPE_SEL )
self._click( MOUSE_POS.CDT_1st_TYPE_LIMIT )
self._dbl_clk( MOUSE_POS.CDT_1st_INPUT_QTY )
self._typein(str(qty))
self._dbl_clk( MOUSE_POS.CDT_1st_INPUT_PRICE )
self._typein(str(price))
'''
Conditional order 2
'''
self._click( MOUSE_POS.CDT_2nd_ACTION_SEL )
self._click( MOUSE_POS.CDT_2nd_ACTION_SELL )
self._click( MOUSE_POS.CDT_2nd_TYPE_SEL )
self._click( MOUSE_POS.CDT_2nd_TYPE_RANGE )
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_QTY )
self._typein(str(qty))
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_HIGH_PRICE )
self._typein(str(target)) # higher price as target
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_LOW_PRICE )
self._typein(str(stop)) # lower price as stop
def cdt_short_then_cover(self, target, stop, price, qty):
'''
Conditional order 1
'''
self._click( MOUSE_POS.CDT_1st_ACTION_SEL )
self._click( MOUSE_POS.CDT_1st_ACTION_SHORT )
self._click( MOUSE_POS.CDT_1st_TYPE_SEL )
self._click( MOUSE_POS.CDT_1st_TYPE_LIMIT )
self._dbl_clk( MOUSE_POS.CDT_1st_INPUT_QTY )
self._typein(str(qty))
self._dbl_clk( MOUSE_POS.CDT_1st_INPUT_PRICE )
self._typein(str(price))
'''
Conditional order 2
'''
self._click( MOUSE_POS.CDT_2nd_ACTION_SEL )
self._click( MOUSE_POS.CDT_2nd_ACTION_COVER )
self._click( MOUSE_POS.CDT_2nd_TYPE_SEL )
self._click( MOUSE_POS.CDT_2nd_TYPE_RANGE )
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_QTY )
self._typein(str(qty))
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_HIGH_PRICE )
self._typein(str(stop)) # higher price as stop
self._dbl_clk( MOUSE_POS.CDT_2nd_INPUT_LOW_PRICE )
self._typein(str(target)) # lower price as target
def cdt_send(self):
self._click( MOUSE_POS.CDT_BTN_SEND )
def cdt_cancel(self):
self._click( MOUSE_POS.CDT_BTN_CANCEL )
'''
Get
'''
def cur_pos(self):
'''
Return current position size for current ticker
'''
self._dbl_clk( MOUSE_POS.INPUT_QUANTITY )
self._r_click( MOUSE_POS.INPUT_QUANTITY )
self._click( MOUSE_POS.INPUT_QUANTITY_COPY )
return clipboard.paste()
def get_mouse_pos(self):
'''
Put your mouse on certain input or button,
Run this function to get its position,
Copy & paste it to config
'''
print('The current pointer position is {0}'.format(mController().position)) | en | 0.775857 | Mouse position enumerater Config specific input or button's cursor position Some basic functions Regular Order Actions Change ticker for regular order & chartingd window Set order price Set order quantity Prepare lmt order with qty/price Click BUY button Click SELL button Click SHORT button Click COVER button Click CANCEL button Conditional Order Actions Change ticker in Condition order window Ensure condition order window opening Conditional order 1 Conditional order 2 # higher price as target # lower price as stop Conditional order 1 Conditional order 2 # higher price as stop # lower price as target Get Return current position size for current ticker Put your mouse on certain input or button, Run this function to get its position, Copy & paste it to config | 3.115932 | 3 |
main/admin.py | sixyang/chouti | 0 | 6618834 | from django.contrib import admin
from main.models import MyUser, News
# Register your models here.
@admin.register(News)
class MyAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'news_id', 'pic_url', 'url', 'news_type_id',
'user_id')
| from django.contrib import admin
from main.models import MyUser, News
# Register your models here.
@admin.register(News)
class MyAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'news_id', 'pic_url', 'url', 'news_type_id',
'user_id')
| en | 0.968259 | # Register your models here. | 1.822487 | 2 |
leetcode/0-250/293-842. Split Array into Fibonacci Sequence.py | palash24/algorithms-and-data-structures | 23 | 6618835 | # 842. Split Array into Fibonacci Sequence
class Solution:
def splitIntoFibonacci(self, S: str) -> List[int]:
for i in range(min(10, len(S))):
x = S[:i+1]
if x.startsWith('0'): break
a = int(x)
for j in range(i+1, min(10, len(S))):
y = S[i+1:j+1]
if y.startsWith('0'): break
b = int(y)
fib = [a, b]
k = j+1
while k < (len(S)):
nxt = fib[-1] + fib[-2]
nxtS = str(nxt)
if nxt <= 2**31-1 and S[k:].startsWith(nxtS):
k += len(nxtS)
fib.append(nxt)
else:
break | # 842. Split Array into Fibonacci Sequence
class Solution:
def splitIntoFibonacci(self, S: str) -> List[int]:
for i in range(min(10, len(S))):
x = S[:i+1]
if x.startsWith('0'): break
a = int(x)
for j in range(i+1, min(10, len(S))):
y = S[i+1:j+1]
if y.startsWith('0'): break
b = int(y)
fib = [a, b]
k = j+1
while k < (len(S)):
nxt = fib[-1] + fib[-2]
nxtS = str(nxt)
if nxt <= 2**31-1 and S[k:].startsWith(nxtS):
k += len(nxtS)
fib.append(nxt)
else:
break | en | 0.692903 | # 842. Split Array into Fibonacci Sequence | 3.283528 | 3 |
utils/ip2city/ip138.py | huioo/tornadoWeb | 0 | 6618836 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import logging
import time
import signal
import tornado.gen
import tornado.httpclient
from utils.utils import utf8
import utils.httputils
area_regex = re.compile('(?is)<li>本站数据:([^<]*?)</li>')
class Ip138Finder(object):
def __init__(self):
self.api_url = 'http://www.ip138.com/ips1388.asp?ip={ip}&action=2'
self.headers = {
"User-Agent": "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Referer": "http://www.ip138.com/",
}
def parse_response_result(self, response):
"""
:param response:
:return: city ip对应的城市
"""
city = ''
try:
response = utf8(response.decode('gb18030', 'ignore'))
result = area_regex.findall(response)
city = result[0] if result else ''
city = city.split(' ')[0].replace('新疆维吾尔自治区', '').replace('广西壮族自治区', '')
city = city.replace('宁夏回族自治区', '').replace('西藏自治区', '').replace('内蒙古自治区', '')
city = city.split('省')[-1].replace('市', '')
except:
logging.info('taobao ip request error, response:{}'.format(response))
return city
@tornado.gen.coroutine
def find(self, ip):
result = ''
try:
url = self.api_url.format(ip=ip)
response = yield utils.httputils.async_http_fetch(
url=url,
timeout=15,
method='GET',
headers=self.headers,
validate_cert=False
)
result = self.parse_response_result(response)
logging.info('[response] ip138_api_search:{} success, result:{}'.format(url, result))
except tornado.httpclient.HTTPError as e:
logging.error('[http] http error:"%s"' % str(e))
result = ''
except Exception as e:
logging.error('[http] http error:"%s"' % str(e))
result = ''
raise tornado.gen.Return(result)
class TestHandler(object):
def shutdown(self):
io_loop = tornado.ioloop.IOLoop.instance()
deadline = time.time() + 5
def stop_loop():
now = time.time()
if now < deadline and (io_loop._callbacks or io_loop._timeouts):
io_loop.add_timeout(now + 1, stop_loop)
else:
# 处理完现有的 callback 和 timeout 后,可以跳出 io_loop.start() 里的循环
io_loop.stop()
stop_loop()
@tornado.gen.coroutine
def work(self):
print 'work in'
handler = Ip138Finder()
ip_dict = {
'172.16.31.10': '乌鲁木齐',
'192.168.3.11': '南宁',
'172.16.58.3': '银川',
'192.168.3.11': '拉萨',
'192.168.127.12': '赤峰',
}
for ip, city in ip_dict.items():
city_result = yield handler.find(ip)
assert city == city_result
tornado.ioloop.IOLoop.instance().add_callback(self.shutdown)
def sig_handler(self, sig, frame):
tornado.ioloop.IOLoop.instance().add_callback(self.shutdown)
def main(self):
loop = tornado.ioloop.IOLoop.instance()
signal.signal(signal.SIGTERM, self.sig_handler)
signal.signal(signal.SIGINT, self.sig_handler)
tornado.ioloop.IOLoop.current().add_timeout(
time.time() + 2, self.work
)
loop.start()
if __name__ == '__main__':
TestHandler().main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import logging
import time
import signal
import tornado.gen
import tornado.httpclient
from utils.utils import utf8
import utils.httputils
area_regex = re.compile('(?is)<li>本站数据:([^<]*?)</li>')
class Ip138Finder(object):
def __init__(self):
self.api_url = 'http://www.ip138.com/ips1388.asp?ip={ip}&action=2'
self.headers = {
"User-Agent": "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"Referer": "http://www.ip138.com/",
}
def parse_response_result(self, response):
"""
:param response:
:return: city ip对应的城市
"""
city = ''
try:
response = utf8(response.decode('gb18030', 'ignore'))
result = area_regex.findall(response)
city = result[0] if result else ''
city = city.split(' ')[0].replace('新疆维吾尔自治区', '').replace('广西壮族自治区', '')
city = city.replace('宁夏回族自治区', '').replace('西藏自治区', '').replace('内蒙古自治区', '')
city = city.split('省')[-1].replace('市', '')
except:
logging.info('taobao ip request error, response:{}'.format(response))
return city
@tornado.gen.coroutine
def find(self, ip):
result = ''
try:
url = self.api_url.format(ip=ip)
response = yield utils.httputils.async_http_fetch(
url=url,
timeout=15,
method='GET',
headers=self.headers,
validate_cert=False
)
result = self.parse_response_result(response)
logging.info('[response] ip138_api_search:{} success, result:{}'.format(url, result))
except tornado.httpclient.HTTPError as e:
logging.error('[http] http error:"%s"' % str(e))
result = ''
except Exception as e:
logging.error('[http] http error:"%s"' % str(e))
result = ''
raise tornado.gen.Return(result)
class TestHandler(object):
def shutdown(self):
io_loop = tornado.ioloop.IOLoop.instance()
deadline = time.time() + 5
def stop_loop():
now = time.time()
if now < deadline and (io_loop._callbacks or io_loop._timeouts):
io_loop.add_timeout(now + 1, stop_loop)
else:
# 处理完现有的 callback 和 timeout 后,可以跳出 io_loop.start() 里的循环
io_loop.stop()
stop_loop()
@tornado.gen.coroutine
def work(self):
print 'work in'
handler = Ip138Finder()
ip_dict = {
'172.16.31.10': '乌鲁木齐',
'192.168.3.11': '南宁',
'172.16.58.3': '银川',
'192.168.3.11': '拉萨',
'192.168.127.12': '赤峰',
}
for ip, city in ip_dict.items():
city_result = yield handler.find(ip)
assert city == city_result
tornado.ioloop.IOLoop.instance().add_callback(self.shutdown)
def sig_handler(self, sig, frame):
tornado.ioloop.IOLoop.instance().add_callback(self.shutdown)
def main(self):
loop = tornado.ioloop.IOLoop.instance()
signal.signal(signal.SIGTERM, self.sig_handler)
signal.signal(signal.SIGINT, self.sig_handler)
tornado.ioloop.IOLoop.current().add_timeout(
time.time() + 2, self.work
)
loop.start()
if __name__ == '__main__':
TestHandler().main()
| zh | 0.688501 | #!/usr/bin/env python # -*- coding: utf-8 -*- :param response: :return: city ip对应的城市 # 处理完现有的 callback 和 timeout 后,可以跳出 io_loop.start() 里的循环 | 2.535577 | 3 |
scripts/build_backtranslations_dictionary.py | ufal/augpt | 11 | 6618837 | #!/bin/env python
import argparse
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', default='backtranslations.yaml')
parser.add_argument('source')
parser.add_argument('alternative', nargs='+')
args = parser.parse_args()
source = open(args.source, 'r').readlines()
alternatives = [open(x, 'r').readlines() for x in args.alternative]
dictionary = {x[0].rstrip('\n'): [y.rstrip('\n') for y in x[1:]] for x in zip(source, *alternatives)}
with open(args.out, 'w+') as f:
yaml.dump(dictionary, f)
| #!/bin/env python
import argparse
import yaml
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--out', default='backtranslations.yaml')
parser.add_argument('source')
parser.add_argument('alternative', nargs='+')
args = parser.parse_args()
source = open(args.source, 'r').readlines()
alternatives = [open(x, 'r').readlines() for x in args.alternative]
dictionary = {x[0].rstrip('\n'): [y.rstrip('\n') for y in x[1:]] for x in zip(source, *alternatives)}
with open(args.out, 'w+') as f:
yaml.dump(dictionary, f)
| ru | 0.206726 | #!/bin/env python | 2.955279 | 3 |
MoinMoin/auth/_tests/test_ldap_login.py | RealTimeWeb/wikisite | 0 | 6618838 | <reponame>RealTimeWeb/wikisite
# -*- coding: utf-8 -*-
"""
MoinMoin - MoinMoin.auth.ldap Tests
@copyright: 2008 MoinMoin:ThomasWaldmann,
2010 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import py.test
from MoinMoin._tests.ldap_testbase import LDAPTstBase, LdapEnvironment, check_environ, SLAPD_EXECUTABLE
from MoinMoin._tests.ldap_testdata import *
from MoinMoin._tests import nuke_user, wikiconfig
from MoinMoin.auth import handle_login
# first check if we have python 2.4, python-ldap and slapd:
msg = check_environ()
if msg:
py.test.skip(msg)
del msg
import ldap
class TestLDAPServer(LDAPTstBase):
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
def testLDAP(self):
""" Just try accessing the LDAP server and see if usera and userb are in LDAP. """
server_uri = self.ldap_env.slapd.url
base_dn = self.ldap_env.basedn
lo = ldap.initialize(server_uri)
ldap.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) # ldap v2 is outdated
lo.simple_bind_s('', '')
lusers = lo.search_st(base_dn, ldap.SCOPE_SUBTREE, '(uid=*)')
uids = [ldap_dict['uid'][0] for dn, ldap_dict in lusers]
assert 'usera' in uids
assert 'userb' in uids
class TestMoinLDAPLogin(LDAPTstBase):
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
class Config(wikiconfig.Config):
from MoinMoin.auth.ldap_login import LDAPAuth
#ToDo get these vars from the test environment
server_uri = 'ldap://127.0.0.1:3890'
base_dn = 'ou=testing,dc=example,dc=org'
ldap_auth1 = LDAPAuth(server_uri=server_uri, base_dn=base_dn, autocreate=True)
auth = [ldap_auth1, ]
def testMoinLDAPLogin(self):
""" Just try accessing the LDAP server and see if usera and userb are in LDAP. """
# tests that must not authenticate:
u = handle_login(self.request, None, username='', password='')
assert u is None
u = handle_login(self.request, None, username='usera', password='')
assert u is None
u = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u is None
u = handle_login(self.request, None, username='userawrong', password='<PASSWORD>')
assert u is None
# tests that must authenticate:
u1 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u1 is not None
assert u1.valid
u2 = handle_login(self.request, None, username='userb', password='<PASSWORD>')
assert u2 is not None
assert u2.valid
# check if usera and userb have different ids:
assert u1.id != u2.id
class TestBugDefaultPasswd(LDAPTstBase):
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
class Config(wikiconfig.Config):
from MoinMoin.auth.ldap_login import LDAPAuth
from MoinMoin.auth import MoinAuth
#ToDo get these vars from the test environment
server_uri = 'ldap://127.0.0.1:3890'
base_dn = 'ou=testing,dc=example,dc=org'
ldap_auth = LDAPAuth(server_uri=server_uri, base_dn=base_dn, autocreate=True)
moin_auth = MoinAuth()
auth = [ldap_auth, moin_auth]
def teardown_class(self):
""" Stop slapd, remove LDAP server environment """
self.ldap_env.stop_slapd()
self.ldap_env.destroy_env()
def testBugDefaultPasswd(self):
""" Login via LDAP (this creates user profile and up to 1.7.0rc1 it put
a default password there), then try logging in via moin login using
that default password or an empty password.
"""
nuke_user(self.request, u'usera')
# do a LDAPAuth login (as a side effect, this autocreates the user profile):
u1 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u1 is not None
assert u1.valid
# now we kill the LDAP server:
#self.ldap_env.slapd.stop()
# now try a MoinAuth login:
# try the default password that worked in 1.7 up to rc1:
u2 = handle_login(self.request, None, username='usera', password='{<PASSWORD>')
assert u2 is None
# try using no password:
u2 = handle_login(self.request, None, username='usera', password='')
assert u2 is None
# try using wrong password:
u2 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u2 is None
class TestTwoLdapServers:
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
def setup_class(self):
""" Create LDAP servers environment, start slapds """
self.ldap_envs = []
for instance in range(2):
ldap_env = LdapEnvironment(self.basedn, self.rootdn, self.rootpw, instance=instance)
ldap_env.create_env(slapd_config=self.slapd_config)
started = ldap_env.start_slapd()
if not started:
py.test.skip("Failed to start %s process, please see your syslog / log files"
" (and check if stopping apparmor helps, in case you use it)." % SLAPD_EXECUTABLE)
ldap_env.load_directory(ldif_content=self.ldif_content)
self.ldap_envs.append(ldap_env)
def teardown_class(self):
""" Stop slapd, remove LDAP server environment """
for ldap_env in self.ldap_envs:
ldap_env.stop_slapd()
ldap_env.destroy_env()
def testLDAP(self):
""" Just try accessing the LDAP servers and see if usera and userb are in LDAP. """
for ldap_env in self.ldap_envs:
server_uri = ldap_env.slapd.url
base_dn = ldap_env.basedn
lo = ldap.initialize(server_uri)
ldap.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) # ldap v2 is outdated
lo.simple_bind_s('', '')
lusers = lo.search_st(base_dn, ldap.SCOPE_SUBTREE, '(uid=*)')
uids = [ldap_dict['uid'][0] for dn, ldap_dict in lusers]
assert 'usera' in uids
assert 'userb' in uids
class TestLdapFailover:
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
def setup_class(self):
""" Create LDAP servers environment, start slapds """
self.ldap_envs = []
for instance in range(2):
ldap_env = LdapEnvironment(self.basedn, self.rootdn, self.rootpw, instance=instance)
ldap_env.create_env(slapd_config=self.slapd_config)
started = ldap_env.start_slapd()
if not started:
py.test.skip("Failed to start %s process, please see your syslog / log files"
" (and check if stopping apparmor helps, in case you use it)." % SLAPD_EXECUTABLE)
ldap_env.load_directory(ldif_content=self.ldif_content)
self.ldap_envs.append(ldap_env)
class Config(wikiconfig.Config):
from MoinMoin.auth.ldap_login import LDAPAuth
#ToDo get these vars from the test environment
server_uri = 'ldap://127.0.0.1:3891'
base_dn = 'ou=testing,dc=example,dc=org'
ldap_auth1 = LDAPAuth(server_uri=server_uri, base_dn=base_dn,
name="ldap1", autocreate=True,
timeout=1)
# short timeout, faster testing
server_uri = 'ldap://127.0.0.1:3892'
ldap_auth2 = LDAPAuth(server_uri=server_uri, base_dn=base_dn,
name="ldap2", autocreate=True,
timeout=1)
auth = [ldap_auth1, ldap_auth2]
def teardown_class(self):
""" Stop slapd, remove LDAP server environment """
for ldap_env in self.ldap_envs:
try:
ldap_env.stop_slapd()
except:
pass # one will fail, because it is already stopped
ldap_env.destroy_env()
def testMoinLDAPFailOver(self):
""" Try if it does a failover to a secondary LDAP, if the primary fails. """
# authenticate user (with primary slapd):
u1 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u1 is not None
assert u1.valid
# now we kill our primary LDAP server:
self.ldap_envs[0].slapd.stop()
# try if we can still authenticate (with the second slapd):
u2 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u2 is not None
assert u2.valid
| # -*- coding: utf-8 -*-
"""
MoinMoin - MoinMoin.auth.ldap Tests
@copyright: 2008 MoinMoin:ThomasWaldmann,
2010 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import py.test
from MoinMoin._tests.ldap_testbase import LDAPTstBase, LdapEnvironment, check_environ, SLAPD_EXECUTABLE
from MoinMoin._tests.ldap_testdata import *
from MoinMoin._tests import nuke_user, wikiconfig
from MoinMoin.auth import handle_login
# first check if we have python 2.4, python-ldap and slapd:
msg = check_environ()
if msg:
py.test.skip(msg)
del msg
import ldap
class TestLDAPServer(LDAPTstBase):
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
def testLDAP(self):
""" Just try accessing the LDAP server and see if usera and userb are in LDAP. """
server_uri = self.ldap_env.slapd.url
base_dn = self.ldap_env.basedn
lo = ldap.initialize(server_uri)
ldap.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) # ldap v2 is outdated
lo.simple_bind_s('', '')
lusers = lo.search_st(base_dn, ldap.SCOPE_SUBTREE, '(uid=*)')
uids = [ldap_dict['uid'][0] for dn, ldap_dict in lusers]
assert 'usera' in uids
assert 'userb' in uids
class TestMoinLDAPLogin(LDAPTstBase):
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
class Config(wikiconfig.Config):
from MoinMoin.auth.ldap_login import LDAPAuth
#ToDo get these vars from the test environment
server_uri = 'ldap://127.0.0.1:3890'
base_dn = 'ou=testing,dc=example,dc=org'
ldap_auth1 = LDAPAuth(server_uri=server_uri, base_dn=base_dn, autocreate=True)
auth = [ldap_auth1, ]
def testMoinLDAPLogin(self):
""" Just try accessing the LDAP server and see if usera and userb are in LDAP. """
# tests that must not authenticate:
u = handle_login(self.request, None, username='', password='')
assert u is None
u = handle_login(self.request, None, username='usera', password='')
assert u is None
u = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u is None
u = handle_login(self.request, None, username='userawrong', password='<PASSWORD>')
assert u is None
# tests that must authenticate:
u1 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u1 is not None
assert u1.valid
u2 = handle_login(self.request, None, username='userb', password='<PASSWORD>')
assert u2 is not None
assert u2.valid
# check if usera and userb have different ids:
assert u1.id != u2.id
class TestBugDefaultPasswd(LDAPTstBase):
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
class Config(wikiconfig.Config):
from MoinMoin.auth.ldap_login import LDAPAuth
from MoinMoin.auth import MoinAuth
#ToDo get these vars from the test environment
server_uri = 'ldap://127.0.0.1:3890'
base_dn = 'ou=testing,dc=example,dc=org'
ldap_auth = LDAPAuth(server_uri=server_uri, base_dn=base_dn, autocreate=True)
moin_auth = MoinAuth()
auth = [ldap_auth, moin_auth]
def teardown_class(self):
""" Stop slapd, remove LDAP server environment """
self.ldap_env.stop_slapd()
self.ldap_env.destroy_env()
def testBugDefaultPasswd(self):
""" Login via LDAP (this creates user profile and up to 1.7.0rc1 it put
a default password there), then try logging in via moin login using
that default password or an empty password.
"""
nuke_user(self.request, u'usera')
# do a LDAPAuth login (as a side effect, this autocreates the user profile):
u1 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u1 is not None
assert u1.valid
# now we kill the LDAP server:
#self.ldap_env.slapd.stop()
# now try a MoinAuth login:
# try the default password that worked in 1.7 up to rc1:
u2 = handle_login(self.request, None, username='usera', password='{<PASSWORD>')
assert u2 is None
# try using no password:
u2 = handle_login(self.request, None, username='usera', password='')
assert u2 is None
# try using wrong password:
u2 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u2 is None
class TestTwoLdapServers:
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
def setup_class(self):
""" Create LDAP servers environment, start slapds """
self.ldap_envs = []
for instance in range(2):
ldap_env = LdapEnvironment(self.basedn, self.rootdn, self.rootpw, instance=instance)
ldap_env.create_env(slapd_config=self.slapd_config)
started = ldap_env.start_slapd()
if not started:
py.test.skip("Failed to start %s process, please see your syslog / log files"
" (and check if stopping apparmor helps, in case you use it)." % SLAPD_EXECUTABLE)
ldap_env.load_directory(ldif_content=self.ldif_content)
self.ldap_envs.append(ldap_env)
def teardown_class(self):
""" Stop slapd, remove LDAP server environment """
for ldap_env in self.ldap_envs:
ldap_env.stop_slapd()
ldap_env.destroy_env()
def testLDAP(self):
""" Just try accessing the LDAP servers and see if usera and userb are in LDAP. """
for ldap_env in self.ldap_envs:
server_uri = ldap_env.slapd.url
base_dn = ldap_env.basedn
lo = ldap.initialize(server_uri)
ldap.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) # ldap v2 is outdated
lo.simple_bind_s('', '')
lusers = lo.search_st(base_dn, ldap.SCOPE_SUBTREE, '(uid=*)')
uids = [ldap_dict['uid'][0] for dn, ldap_dict in lusers]
assert 'usera' in uids
assert 'userb' in uids
class TestLdapFailover:
basedn = BASEDN
rootdn = ROOTDN
rootpw = ROOTPW
slapd_config = SLAPD_CONFIG
ldif_content = LDIF_CONTENT
def setup_class(self):
""" Create LDAP servers environment, start slapds """
self.ldap_envs = []
for instance in range(2):
ldap_env = LdapEnvironment(self.basedn, self.rootdn, self.rootpw, instance=instance)
ldap_env.create_env(slapd_config=self.slapd_config)
started = ldap_env.start_slapd()
if not started:
py.test.skip("Failed to start %s process, please see your syslog / log files"
" (and check if stopping apparmor helps, in case you use it)." % SLAPD_EXECUTABLE)
ldap_env.load_directory(ldif_content=self.ldif_content)
self.ldap_envs.append(ldap_env)
class Config(wikiconfig.Config):
from MoinMoin.auth.ldap_login import LDAPAuth
#ToDo get these vars from the test environment
server_uri = 'ldap://127.0.0.1:3891'
base_dn = 'ou=testing,dc=example,dc=org'
ldap_auth1 = LDAPAuth(server_uri=server_uri, base_dn=base_dn,
name="ldap1", autocreate=True,
timeout=1)
# short timeout, faster testing
server_uri = 'ldap://127.0.0.1:3892'
ldap_auth2 = LDAPAuth(server_uri=server_uri, base_dn=base_dn,
name="ldap2", autocreate=True,
timeout=1)
auth = [ldap_auth1, ldap_auth2]
def teardown_class(self):
""" Stop slapd, remove LDAP server environment """
for ldap_env in self.ldap_envs:
try:
ldap_env.stop_slapd()
except:
pass # one will fail, because it is already stopped
ldap_env.destroy_env()
def testMoinLDAPFailOver(self):
""" Try if it does a failover to a secondary LDAP, if the primary fails. """
# authenticate user (with primary slapd):
u1 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u1 is not None
assert u1.valid
# now we kill our primary LDAP server:
self.ldap_envs[0].slapd.stop()
# try if we can still authenticate (with the second slapd):
u2 = handle_login(self.request, None, username='usera', password='<PASSWORD>')
assert u2 is not None
assert u2.valid | en | 0.784664 | # -*- coding: utf-8 -*- MoinMoin - MoinMoin.auth.ldap Tests @copyright: 2008 MoinMoin:ThomasWaldmann, 2010 MoinMoin:ReimarBauer @license: GNU GPL, see COPYING for details. # first check if we have python 2.4, python-ldap and slapd: Just try accessing the LDAP server and see if usera and userb are in LDAP. # ldap v2 is outdated #ToDo get these vars from the test environment Just try accessing the LDAP server and see if usera and userb are in LDAP. # tests that must not authenticate: # tests that must authenticate: # check if usera and userb have different ids: #ToDo get these vars from the test environment Stop slapd, remove LDAP server environment Login via LDAP (this creates user profile and up to 1.7.0rc1 it put a default password there), then try logging in via moin login using that default password or an empty password. # do a LDAPAuth login (as a side effect, this autocreates the user profile): # now we kill the LDAP server: #self.ldap_env.slapd.stop() # now try a MoinAuth login: # try the default password that worked in 1.7 up to rc1: # try using no password: # try using wrong password: Create LDAP servers environment, start slapds Stop slapd, remove LDAP server environment Just try accessing the LDAP servers and see if usera and userb are in LDAP. # ldap v2 is outdated Create LDAP servers environment, start slapds #ToDo get these vars from the test environment # short timeout, faster testing Stop slapd, remove LDAP server environment # one will fail, because it is already stopped Try if it does a failover to a secondary LDAP, if the primary fails. # authenticate user (with primary slapd): # now we kill our primary LDAP server: # try if we can still authenticate (with the second slapd): | 2.091305 | 2 |
main.py | alanriddle/fs-proj4-conference-central | 0 | 6618839 | #!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
__author__ = '<EMAIL> (<NAME>)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.ext import ndb
from google.appengine.api import memcache
from conference import ConferenceApi
from conference import MEMCACHE_FEATURED_SPEAKER_KEY
from models import Session
import logging
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'<EMAIL>' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class MakeFeaturedSpeakerMessage(webapp2.RequestHandler):
def post(self):
"""Make Featured Message for memcache."""
wsck = self.request.get('websafeConferenceKey')
wssk = self.request.get('websafeSessionKey')
speaker = ndb.Key(urlsafe=wssk).get().speaker
sessions = Session.query(ancestor=ndb.Key(urlsafe=wsck))
sessions = sessions.fetch()
sessions_with_same_speaker = [ s for s in sessions
if s.speaker == speaker ]
session_names = [ s.name for s in sessions_with_same_speaker ]
if len(session_names) > 1:
# build message for memcache
conference_name = ndb.Key(urlsafe=wsck).get().name
message = ["Featured Speaker",
speaker,
"Conference: " + conference_name
]
s_names = []
for s_name in session_names:
s_names.append("Session: " + s_name)
message = message + s_names
message = "\n".join(message)
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, message)
else:
memcache.delete(MEMCACHE_FEATURED_SPEAKER_KEY)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/make_featured_speaker_message', MakeFeaturedSpeakerMessage),
], debug=True)
| #!/usr/bin/env python
"""
main.py -- Udacity conference server-side Python App Engine
HTTP controller handlers for memcache & task queue access
$Id$
created by wesc on 2014 may 24
"""
__author__ = '<EMAIL> (<NAME>)'
import webapp2
from google.appengine.api import app_identity
from google.appengine.api import mail
from google.appengine.ext import ndb
from google.appengine.api import memcache
from conference import ConferenceApi
from conference import MEMCACHE_FEATURED_SPEAKER_KEY
from models import Session
import logging
class SetAnnouncementHandler(webapp2.RequestHandler):
def get(self):
"""Set Announcement in Memcache."""
ConferenceApi._cacheAnnouncement()
self.response.set_status(204)
class SendConfirmationEmailHandler(webapp2.RequestHandler):
def post(self):
"""Send email confirming Conference creation."""
mail.send_mail(
'<EMAIL>' % (
app_identity.get_application_id()), # from
self.request.get('email'), # to
'You created a new Conference!', # subj
'Hi, you have created a following ' # body
'conference:\r\n\r\n%s' % self.request.get(
'conferenceInfo')
)
class MakeFeaturedSpeakerMessage(webapp2.RequestHandler):
def post(self):
"""Make Featured Message for memcache."""
wsck = self.request.get('websafeConferenceKey')
wssk = self.request.get('websafeSessionKey')
speaker = ndb.Key(urlsafe=wssk).get().speaker
sessions = Session.query(ancestor=ndb.Key(urlsafe=wsck))
sessions = sessions.fetch()
sessions_with_same_speaker = [ s for s in sessions
if s.speaker == speaker ]
session_names = [ s.name for s in sessions_with_same_speaker ]
if len(session_names) > 1:
# build message for memcache
conference_name = ndb.Key(urlsafe=wsck).get().name
message = ["Featured Speaker",
speaker,
"Conference: " + conference_name
]
s_names = []
for s_name in session_names:
s_names.append("Session: " + s_name)
message = message + s_names
message = "\n".join(message)
memcache.set(MEMCACHE_FEATURED_SPEAKER_KEY, message)
else:
memcache.delete(MEMCACHE_FEATURED_SPEAKER_KEY)
app = webapp2.WSGIApplication([
('/crons/set_announcement', SetAnnouncementHandler),
('/tasks/send_confirmation_email', SendConfirmationEmailHandler),
('/tasks/make_featured_speaker_message', MakeFeaturedSpeakerMessage),
], debug=True)
| en | 0.651119 | #!/usr/bin/env python main.py -- Udacity conference server-side Python App Engine HTTP controller handlers for memcache & task queue access $Id$ created by wesc on 2014 may 24 Set Announcement in Memcache. Send email confirming Conference creation. # from # to # subj # body Make Featured Message for memcache. # build message for memcache | 2.371228 | 2 |
uer/targets/target.py | krevas/ET-BERT | 0 | 6618840 | import torch.nn as nn
class Target(nn.Module):
def __init__(self):
self.target_list = []
self.target_name_list = []
self.loss_info = {}
def update(self, target, target_name):
self.target_list.append(target.forward)
self.target_name_list.append(target_name)
if "_modules" in self.__dict__:
self.__dict__["_modules"].update(target.__dict__["_modules"])
else:
self.__dict__.update(target.__dict__)
def forward(self, memory_bank, tgt, seg):
self.loss_info = {}
for i, target in enumerate(self.target_list):
if len(self.target_list) > 1:
self.loss_info[self.target_name_list[i]] = target(memory_bank, tgt[self.target_name_list[i]], seg)
else:
self.loss_info = target(memory_bank, tgt, seg)
return self.loss_info
| import torch.nn as nn
class Target(nn.Module):
def __init__(self):
self.target_list = []
self.target_name_list = []
self.loss_info = {}
def update(self, target, target_name):
self.target_list.append(target.forward)
self.target_name_list.append(target_name)
if "_modules" in self.__dict__:
self.__dict__["_modules"].update(target.__dict__["_modules"])
else:
self.__dict__.update(target.__dict__)
def forward(self, memory_bank, tgt, seg):
self.loss_info = {}
for i, target in enumerate(self.target_list):
if len(self.target_list) > 1:
self.loss_info[self.target_name_list[i]] = target(memory_bank, tgt[self.target_name_list[i]], seg)
else:
self.loss_info = target(memory_bank, tgt, seg)
return self.loss_info
| none | 1 | 2.657609 | 3 | |
iris_sdk/models/maps/dlda_tn_group.py | scottbarstow/iris-python | 0 | 6618841 | #!/usr/bin/env python
from iris_sdk.models.maps.base_map import BaseMap
class DldaTnGroupMap(BaseMap):
telephone_numbers = None
account_type = None
listing_type = None
list_address = None
listing_name = None
address = None | #!/usr/bin/env python
from iris_sdk.models.maps.base_map import BaseMap
class DldaTnGroupMap(BaseMap):
telephone_numbers = None
account_type = None
listing_type = None
list_address = None
listing_name = None
address = None | ru | 0.26433 | #!/usr/bin/env python | 1.820687 | 2 |
SPDX_licenses.py | sgeary01/inventory_report_example | 0 | 6618842 | '''
Copyright 2020 Flexera Software LLC
See LICENSE.TXT for full license text
SPDX-License-Identifier: MIT
Author : sgeary
Created On : Fri May 01 2020
File : licenses.py
'''
LICENSEMAPPINGS = {}
LICENSEMAPPINGS["I don't know"] = "Undeclared"
LICENSEMAPPINGS["389 Directory Server Exception"]="389-exception"
LICENSEMAPPINGS["3dfx Glide License"]="Glide"
LICENSEMAPPINGS["3DFX GLIDE Source Code General Public License"]="Glide"
LICENSEMAPPINGS["Abstyles License"]="Abstyles"
LICENSEMAPPINGS["Academic Free License v1.1"]="AFL-1.1"
LICENSEMAPPINGS["Academic Free License v1.2"]="AFL-1.2"
LICENSEMAPPINGS["Academic Free License v2.0"]="AFL-2.0"
LICENSEMAPPINGS["Academic Free License v2.1"]="AFL-2.1"
LICENSEMAPPINGS["Academic Free License v3.0"]="AFL-3.0"
LICENSEMAPPINGS["Academy of Motion Picture Arts and Sciences BSD"]="AMPAS"
LICENSEMAPPINGS["Adaptive Public License 1.0"]="APL-1.0"
LICENSEMAPPINGS["Adobe Glyph List License"]="Adobe-Glyph"
LICENSEMAPPINGS["Adobe Postscript AFM License"]="APAFML"
LICENSEMAPPINGS["Adobe Systems Incorporated Source Code License Agreement"]="Adobe-2006"
LICENSEMAPPINGS["Affero General Public License v1.0"]="AGPL-1.0"
LICENSEMAPPINGS["Afmparse License"]="Afmparse"
LICENSEMAPPINGS["Aladdin Free Public License v8"]="Aladdin"
LICENSEMAPPINGS["Allegro Giftware License"]="Giftware"
LICENSEMAPPINGS["Amazon Digital Services License"]="ADSL"
LICENSEMAPPINGS["AMD's plpa_map.c License"]="AMDPLPA"
LICENSEMAPPINGS["ANTLR Software Rights Notice"]="ANTLR-PD"
LICENSEMAPPINGS["Apache License 1.0"]="Apache-1.0"
LICENSEMAPPINGS["Apache License 1.1"]="Apache-1.1"
LICENSEMAPPINGS["Apache License 2.0"]="Apache-2.0"
LICENSEMAPPINGS["Apple MIT License"]="AML"
LICENSEMAPPINGS["Apple Public Source License 1.0"]="APSL-1.0"
LICENSEMAPPINGS["Apple Public Source License 1.1"]="APSL-1.1"
LICENSEMAPPINGS["Apple Public Source License 1.2"]="APSL-1.2"
LICENSEMAPPINGS["Apple Public Source License 2.0"]="APSL-2.0"
LICENSEMAPPINGS["Artistic License 1.0"]="Artistic-1.0"
LICENSEMAPPINGS["Artistic License 1.0 w/clause 8"]="Artistic-1.0-cl8"
LICENSEMAPPINGS["Artistic License 2.0"]="Artistic-2.0"
LICENSEMAPPINGS["Attribution Assurance License"]="AAL"
LICENSEMAPPINGS["Autoconf exception 2.0"]="Autoconf-exception-2.0"
LICENSEMAPPINGS["Autoconf exception 3.0"]="Autoconf-exception-3.0"
LICENSEMAPPINGS["Bahyph License"]="Bahyph"
LICENSEMAPPINGS["Barr License"]="Barr"
LICENSEMAPPINGS["Bison exception 2.2"]="Bison-exception-2.2"
LICENSEMAPPINGS["BitTorrent Open Source License v1.0"]="BitTorrent-1.0"
LICENSEMAPPINGS["BitTorrent Open Source License v1.1"]="BitTorrent-1.1"
LICENSEMAPPINGS["Boost Software License 1.0"]="BSL-1.0"
LICENSEMAPPINGS["Bootloader Distribution Exception"]="Bootloader-exception"
LICENSEMAPPINGS["Borceux license"]="Borceux"
LICENSEMAPPINGS["BSD 1-Clause License"]="BSD-1-Clause"
LICENSEMAPPINGS["BSD 2-clause \"Simplified\" or \"FreeBSD\" License"]="BSD-2-Clause"
LICENSEMAPPINGS["BSD 2-clause FreeBSD License"]="BSD-2-Clause-FreeBSD"
LICENSEMAPPINGS["BSD 2-clause NetBSD License"]="BSD-2-Clause-NetBSD"
LICENSEMAPPINGS["BSD 3-clause \"New\" or \"Revised\" License"]="BSD-3-Clause"
LICENSEMAPPINGS["BSD 3-clause Clear License"]="BSD-3-Clause-Clear"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License"]="BSD-3-Clause-No-Nuclear-License"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License 2014"]="BSD-3-Clause-No-Nuclear-License-2014"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear Warranty"]="BSD-3-Clause-No-Nuclear-Warranty"
LICENSEMAPPINGS["BSD 4-clause \"Original\" or \"Old\" License"]="BSD-4-Clause"
LICENSEMAPPINGS["BSD Protection License"]="BSD-Protection"
LICENSEMAPPINGS["BSD Source Code Attribution"]="BSD-Source-Code"
LICENSEMAPPINGS["BSD with attribution"]="BSD-3-Clause-Attribution"
LICENSEMAPPINGS["BSD Zero Clause License"]="0BSD"
LICENSEMAPPINGS["BSD-2-Clause Plus Patent License"]="BSD-2-Clause-Patent"
LICENSEMAPPINGS["BSD-4-Clause (University of California-Specific)"]="BSD-4-Clause-UC"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.5"]="bzip2-1.0.5"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.6"]="bzip2-1.0.6"
LICENSEMAPPINGS["Caldera License"]="Caldera"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.0"]="CECILL-1.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.1"]="CECILL-1.1"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.0"]="CECILL-2.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.1"]="CECILL-2.1"
LICENSEMAPPINGS["CeCILL-B Free Software License Agreement v1.0"]="CECILL-B"
LICENSEMAPPINGS["CeCILL-C Free Software License Agreement v1.0"]="CECILL-C"
LICENSEMAPPINGS["Clarified Artistic License"]="ClArtistic"
LICENSEMAPPINGS["Classpath exception 2.0"]="Classpath-exception-2.0"
LICENSEMAPPINGS["CLISP exception 2.0"]="CLISP-exception-2.0"
LICENSEMAPPINGS["CMU License"]="MIT-CMU"
LICENSEMAPPINGS["CNRI Python License"]="CNRI-Python"
LICENSEMAPPINGS["CNRI Python Open Source GPL Compatible License Agreement"]="CNRI-Python-GPL-Compatible"
LICENSEMAPPINGS["Common Development and Distribution License 1.0"]="CDDL-1.0"
LICENSEMAPPINGS["Common Development and Distribution License 1.1"]="CDDL-1.1"
LICENSEMAPPINGS["Common Public Attribution License 1.0"]="CPAL-1.0"
LICENSEMAPPINGS["Common Public License"]="CPL-1.0"
LICENSEMAPPINGS["Community Data License Agreement Permissive 1.0"]="CDLA-Permissive-1.0"
LICENSEMAPPINGS["Community Data License Agreement Sharing 1.0"]="CDLA-Sharing-1.0"
LICENSEMAPPINGS["Computer Associates Trusted Open Source License 1.1"]="CATOSL-1.1"
LICENSEMAPPINGS["Condor Public License v1.1"]="Condor-1.1"
LICENSEMAPPINGS["Creative Commons Attribution 1.0"]="CC-BY-1.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.0"]="CC-BY-2.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.5"]="CC-BY-2.5"
LICENSEMAPPINGS["Creative Commons Attribution 3.0"]="CC-BY-3.0"
LICENSEMAPPINGS["Creative Commons Attribution 4.0"]="CC-BY-4.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 1.0"]="CC-BY-ND-1.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.0"]="CC-BY-ND-2.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.5"]="CC-BY-ND-2.5"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 3.0"]="CC-BY-ND-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 1.0"]="CC-BY-NC-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.0"]="CC-BY-NC-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.5"]="CC-BY-NC-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 3.0"]="CC-BY-NC-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 1.0"]="CC-BY-NC-ND-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.0"]="CC-BY-NC-ND-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.5"]="CC-BY-NC-ND-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 3.0"]="CC-BY-NC-ND-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 1.0"]="CC-BY-NC-SA-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.0"]="CC-BY-NC-SA-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.5"]="CC-BY-NC-SA-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 3.0"]="CC-BY-NC-SA-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 1.0"]="CC-BY-SA-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.0"]="CC-BY-SA-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.5"]="CC-BY-SA-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 3.0"]="CC-BY-SA-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 4.0"]="CC-BY-SA-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NoDerivatives 4.0"]="CC-BY-ND-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial 4.0"]="CC-BY-NC-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-NoDerivatives 4.0"]="CC-BY-NC-ND-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-ShareAlike 4.0"]="CC-BY-NC-SA-4.0"
LICENSEMAPPINGS["Creative Commons CC0 1.0 Universal"]="CC0-1.0"
LICENSEMAPPINGS["Crossword License"]="Crossword"
LICENSEMAPPINGS["CrystalStacker License"]="CrystalStacker"
LICENSEMAPPINGS["CUA Office Public License v1.0"]="CUA-OPL-1.0"
LICENSEMAPPINGS["Cube License"]="Cube"
LICENSEMAPPINGS["Deutsche Freie Software Lizenz"]="D-FSL-1.0"
LICENSEMAPPINGS["diffmark license"]="diffmark"
LICENSEMAPPINGS["DigiRule FOSS License Exception"]="DigiRule-FOSS-exception"
LICENSEMAPPINGS["Do What The Fuck You Want To Public License"]="WTFPL"
LICENSEMAPPINGS["DOC Software License"]="DOC"
LICENSEMAPPINGS["Dotseqn License"]="Dotseqn"
LICENSEMAPPINGS["DSDP License"]="DSDP"
LICENSEMAPPINGS["dvipdfm License"]="dvipdfm"
LICENSEMAPPINGS["Eclipse Public License 1.0"]="EPL-1.0"
LICENSEMAPPINGS["Eclipse Public License 2.0"]="EPL-2.0"
LICENSEMAPPINGS["eCos exception 2.0"]="eCos-exception-2.0"
LICENSEMAPPINGS["eCos license version 2.0"]="eCos-2.0"
LICENSEMAPPINGS["Educational Community License v1.0"]="ECL-1.0"
LICENSEMAPPINGS["Educational Community License v2.0"]="ECL-2.0"
LICENSEMAPPINGS["eGenix.com Public License 1.1.0"]="eGenix"
LICENSEMAPPINGS["Eiffel Forum License v1.0"]="EFL-1.0"
LICENSEMAPPINGS["Eiffel Forum License v2.0"]="EFL-2.0"
LICENSEMAPPINGS["Enlightenment License (e16)"]="MIT-advertising"
LICENSEMAPPINGS["enna License"]="MIT-enna"
LICENSEMAPPINGS["Entessa Public License"]="Entessa"
LICENSEMAPPINGS["Erlang Public License v1.1"]="ErlPL-1.1"
LICENSEMAPPINGS["EU DataGrid Software License"]="EUDatagrid"
LICENSEMAPPINGS["European Union Public License 1.0"]="EUPL-1.0"
LICENSEMAPPINGS["European Union Public License 1.1"]="EUPL-1.1"
LICENSEMAPPINGS["European Union Public License 1.2"]="EUPL-1.2"
LICENSEMAPPINGS["Eurosym License v2"]="Eurosym"
LICENSEMAPPINGS["Fair License"]="Fair"
LICENSEMAPPINGS["FastCGI"]="OML"
LICENSEMAPPINGS["Fawkes Runtime Exception"]="Fawkes-Runtime-exception"
LICENSEMAPPINGS["feh License"]="MIT-feh"
LICENSEMAPPINGS["FLTK exception"]="FLTK-exception"
LICENSEMAPPINGS["Font exception 2.0"]="Font-exception-2.0"
LICENSEMAPPINGS["Frameworx Open License 1.0"]="Frameworx-1.0"
LICENSEMAPPINGS["FreeImage Public License v1.0"]="FreeImage"
LICENSEMAPPINGS["FreeRTOS Exception 2.0"]="freertos-exception-2.0"
LICENSEMAPPINGS["FreeType License"]="FTL"
LICENSEMAPPINGS["FSF All Permissive License"]="FSFAP"
LICENSEMAPPINGS["FSF Unlimited License"]="FSFUL"
LICENSEMAPPINGS["FSF Unlimited License (with License Retention)"]="FSFULLR"
LICENSEMAPPINGS["GCC Runtime Library exception 2.0"]="GCC-exception-2.0"
LICENSEMAPPINGS["GCC Runtime Library exception 3.1"]="GCC-exception-3.1"
LICENSEMAPPINGS["GL2PS License, Version 2"]="GL2PS"
LICENSEMAPPINGS["Glulxe License"]="Glulxe"
LICENSEMAPPINGS["GNU Affero General Public License v3.0"]="AGPL-3.0"
LICENSEMAPPINGS["GNU Free Documentation License v1.1"]="GFDL-1.1"
LICENSEMAPPINGS["GNU Free Documentation License v1.2"]="GFDL-1.2"
LICENSEMAPPINGS["GNU Free Documentation License v1.3"]="GFDL-1.3"
LICENSEMAPPINGS["GNU General Public License v1.0"]="GPL-1.0"
LICENSEMAPPINGS["GNU General Public License v1.0 or later"]="GPL-1.0+"
LICENSEMAPPINGS["GNU General Public License v2.0"]="GPL-2.0"
LICENSEMAPPINGS["GNU General Public License v2.0 or later"]="GPL-2.0+"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Autoconf exception"]="GPL-2.0-with-autoconf-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Bison exception"]="GPL-2.0-with-bison-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Font exception"]="GPL-2.0-with-font-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/GCC Runtime Library exception"]="GPL-2.0-with-GCC-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 with Classpath Exception"]="GPL-2.0-with-classpath-exception"
LICENSEMAPPINGS["GNU General Public License v3.0"]="GPL-3.0"
LICENSEMAPPINGS["GNU General Public License v3.0 or later"]="GPL-3.0+"
LICENSEMAPPINGS["GNU General Public License v3.0 w/Autoconf exception"]="GPL-3.0-with-autoconf-exception"
LICENSEMAPPINGS["GNU General Public License v3.0 w/GCC Runtime Library exception"]="GPL-3.0-with-GCC-exception"
LICENSEMAPPINGS["GNU JavaMail exception"]="gnu-javamail-exception"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1"]="LGPL-2.1"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1 or later"]="LGPL-2.1+"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0"]="LGPL-3.0"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0 or later"]="LGPL-3.0+"
LICENSEMAPPINGS["GNU Library General Public License v2.0"]="LGPL-2.0"
LICENSEMAPPINGS["GNU Library General Public License v2.0 or later"]="LGPL-2.0+"
LICENSEMAPPINGS["gnuplot License"]="gnuplot"
LICENSEMAPPINGS["gSOAP Public License v1.3b"]="gSOAP-1.3b"
LICENSEMAPPINGS["Haskell Language Report License"]="HaskellReport"
LICENSEMAPPINGS["Historic Permission Notice and Disclaimer"]="HPND"
LICENSEMAPPINGS["i2p GPL+Java Exception"]="i2p-gpl-java-exception"
LICENSEMAPPINGS["IBM PowerPC Initialization and Boot Software"]="IBM-pibs"
LICENSEMAPPINGS["IBM Public License v1.0"]="IPL-1.0"
LICENSEMAPPINGS["ICU License"]="ICU"
LICENSEMAPPINGS["ImageMagick (Apache 2.0) License"]="ImageMagick"
LICENSEMAPPINGS["iMatix Standard Function Library Agreement"]="iMatix"
LICENSEMAPPINGS["Imlib2 License"]="Imlib2"
LICENSEMAPPINGS["Independent JPEG Group License"]="IJG"
LICENSEMAPPINGS["Info-ZIP License"]="Info-ZIP"
LICENSEMAPPINGS["Intel ACPI Software License Agreement"]="Intel-ACPI"
LICENSEMAPPINGS["Intel Open Source License"]="Intel"
LICENSEMAPPINGS["Interbase Public License v1.0"]="Interbase-1.0"
LICENSEMAPPINGS["IPA Font License"]="IPA"
LICENSEMAPPINGS["ISC License (ISC)"]="ISC"
LICENSEMAPPINGS["JasPer License Version 2.0"]="JasPer-2.0"
LICENSEMAPPINGS["Jython License"]="CNRI-Jython"
LICENSEMAPPINGS["LaTeX Project Public License v1.0"]="LPPL-1.0"
LICENSEMAPPINGS["LaTeX Project Public License v1.1"]="LPPL-1.1"
LICENSEMAPPINGS["LaTeX Project Public License v1.2"]="LPPL-1.2"
LICENSEMAPPINGS["LaTeX Project Public License v1.3a"]="LPPL-1.3a"
LICENSEMAPPINGS["LaTeX Project Public License v1.3c"]="LPPL-1.3c"
LICENSEMAPPINGS["Latex2e License"]="Latex2e"
LICENSEMAPPINGS["Lawrence Berkeley National Labs BSD variant license"]="BSD-3-Clause-LBNL"
LICENSEMAPPINGS["Leptonica License"]="Leptonica"
LICENSEMAPPINGS["Lesser General Public License For Linguistic Resources"]="LGPLLR"
LICENSEMAPPINGS["libpng License"]="Libpng"
LICENSEMAPPINGS["libtiff License"]="libtiff"
LICENSEMAPPINGS["Libtool Exception"]="Libtool-exception"
LICENSEMAPPINGS["Licence Art Libre 1.2"]="LAL-1.2"
LICENSEMAPPINGS["Licence Art Libre 1.3"]="LAL-1.3"
LICENSEMAPPINGS["Licence Libre du Québec – Permissive version 1.1"]="LiLiQ-P-1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité forte version 1.1"]="LiLiQ-Rplus-1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité version 1.1"]="LiLiQ-R-1.1"
LICENSEMAPPINGS["Linux Kernel Variant of OpenIB.org license"]="Linux-OpenIB"
LICENSEMAPPINGS["Linux Syscall Note"]="Linux-syscall-note"
LICENSEMAPPINGS["LLVM Exception"]="LLVM-exception"
LICENSEMAPPINGS["Lucent Public License v1.0"]="LPL-1.0"
LICENSEMAPPINGS["Lucent Public License v1.02 (Plan9)"]="LPL-1.02"
LICENSEMAPPINGS["LZMA exception"]="LZMA-exception"
LICENSEMAPPINGS["Macros and Inline Functions Exception"]="mif-exception"
LICENSEMAPPINGS["MakeIndex License"]="MakeIndex"
LICENSEMAPPINGS["Matrix Template Library License"]="MTLL"
LICENSEMAPPINGS["Microsoft Public License (Ms-PL)"]="MS-PL"
LICENSEMAPPINGS["Microsoft Reciprocal License (Ms-RL)"]="MS-RL"
LICENSEMAPPINGS["MirOS Licence"]="MirOS"
LICENSEMAPPINGS["MIT +no-false-attribs license"]="MITNFA"
LICENSEMAPPINGS["MIT License (Expat)"]="MIT"
LICENSEMAPPINGS["MIT-Style License"]="MIT"
LICENSEMAPPINGS["MIT No Attribution"]="MIT-0"
LICENSEMAPPINGS["Motosoto License"]="Motosoto"
LICENSEMAPPINGS["Mozilla Public License 1.0"]="MPL-1.0"
LICENSEMAPPINGS["Mozilla Public License 1.1"]="MPL-1.1"
LICENSEMAPPINGS["Mozilla Public License 2.0"]="MPL-2.0"
LICENSEMAPPINGS["Mozilla Public License 2.0 (no copyleft exception)"]="MPL-2.0-no-copyleft-exception"
LICENSEMAPPINGS["MPICH2 License"]="mpich2"
LICENSEMAPPINGS["Multics License"]="Multics"
LICENSEMAPPINGS["Mup License"]="Mup"
LICENSEMAPPINGS["NASA Open Source Agreement 1.3"]="NASA-1.3"
LICENSEMAPPINGS["Naumen Public License"]="Naumen"
LICENSEMAPPINGS["Net Boolean Public License v1"]="NBPL-1.0"
LICENSEMAPPINGS["netCDF License"]="NetCDF"
LICENSEMAPPINGS["Nethack General Public License"]="NGPL"
LICENSEMAPPINGS["Netizen Open Source License v1.0"]="NOSL"
LICENSEMAPPINGS["Netscape Public License 1.0"]="NPL-1.0"
LICENSEMAPPINGS["Netscape Public License 1.1"]="NPL-1.1"
LICENSEMAPPINGS["Net-SNMP License"]="Net-SNMP"
LICENSEMAPPINGS["Newsletr License"]="Newsletr"
LICENSEMAPPINGS["No Limit Public License"]="NLPL"
LICENSEMAPPINGS["Nokia Open Source License"]="Nokia"
LICENSEMAPPINGS["Nokia Qt LGPL exception 1.1"]="Nokia-Qt-exception-1.1"
LICENSEMAPPINGS["Non-Profit Open Software License 3.0"]="NPOSL-3.0"
LICENSEMAPPINGS["Norwegian Licence for Open Government Data"]="NLOD-1.0"
LICENSEMAPPINGS["Noweb License"]="Noweb"
LICENSEMAPPINGS["NTP License"]="NTP"
LICENSEMAPPINGS["Nunit License"]="Nunit"
LICENSEMAPPINGS["OCLC Research Public License 2.0"]="OCLC-2.0"
LICENSEMAPPINGS["ODC Open Database License v1.0"]="ODbL-1.0"
LICENSEMAPPINGS["ODC Public Domain Dedication & License 1.0"]="PDDL-1.0"
LICENSEMAPPINGS["Open CASCADE Exception 1.0"]="OCCT-exception-1.0"
LICENSEMAPPINGS["Open CASCADE Technology Public License"]="OCCT-PL"
LICENSEMAPPINGS["Open Group Test Suite License"]="OGTSL"
LICENSEMAPPINGS["Open LDAP Public License 2.2.2"]="OLDAP-2.2.2"
LICENSEMAPPINGS["Open LDAP Public License v1.1"]="OLDAP-1.1"
LICENSEMAPPINGS["Open LDAP Public License v1.3"]="OLDAP-1.3"
LICENSEMAPPINGS["Open LDAP Public License v1.4"]="OLDAP-1.4"
LICENSEMAPPINGS["Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)"]="OLDAP-2.0"
LICENSEMAPPINGS["Open LDAP Public License v2.1"]="OLDAP-2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.2"]="OLDAP-2.2"
LICENSEMAPPINGS["Open LDAP Public License v2.2.1"]="OLDAP-2.2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.5"]="OLDAP-2.5"
LICENSEMAPPINGS["Open LDAP Public License v2.6"]="OLDAP-2.6"
LICENSEMAPPINGS["Open Public License v1.0"]="OPL-1.0"
LICENSEMAPPINGS["Open Software License 1.0"]="OSL-1.0"
LICENSEMAPPINGS["Open Software License 1.1"]="OSL-1.1"
LICENSEMAPPINGS["Open Software License 2.0"]="OSL-2.0"
LICENSEMAPPINGS["Open Software License 2.1"]="OSL-2.1"
LICENSEMAPPINGS["Open Software License 3.0"]="OSL-3.0"
LICENSEMAPPINGS["OpenJDK Assembly exception 1.0"]="OpenJDK-assembly-exception-1.0"
LICENSEMAPPINGS["OpenLDAP Public License v1.2"]="OLDAP-1.2"
LICENSEMAPPINGS["OpenLDAP Public License v2.0.1"]="OLDAP-2.0.1"
LICENSEMAPPINGS["OpenLDAP Public License v2.3"]="OLDAP-2.3"
LICENSEMAPPINGS["OpenLDAP Public License v2.4"]="OLDAP-2.4"
LICENSEMAPPINGS["OpenLDAP Public License v2.7"]="OLDAP-2.7"
LICENSEMAPPINGS["OpenLDAP Public License v2.8"]="OLDAP-2.8"
LICENSEMAPPINGS["OpenSSL License"]="OpenSSL"
LICENSEMAPPINGS["OpenVPN OpenSSL Exception"]="openvpn-openssl-exception"
LICENSEMAPPINGS["OSET Public License version 2.1"]="OSET-PL-2.1"
LICENSEMAPPINGS["PERL Artistic License"]="Artistic-1.0-Perl"
LICENSEMAPPINGS["PHP License v3.0"]="PHP-3.0"
LICENSEMAPPINGS["PHP License v3.01"]="PHP-3.01"
LICENSEMAPPINGS["Plexus Classworlds License"]="Plexus"
LICENSEMAPPINGS["psfrag License"]="psfrag"
LICENSEMAPPINGS["psutils License"]="psutils"
LICENSEMAPPINGS["Python License 2.0"]="Python-2.0"
LICENSEMAPPINGS["Q Public License 1.0"]="QPL-1.0"
LICENSEMAPPINGS["Qhull License"]="Qhull"
LICENSEMAPPINGS["Qwt exception 1.0"]="Qwt-exception-1.0"
LICENSEMAPPINGS["Rdisc License"]="Rdisc"
LICENSEMAPPINGS["RealNetworks Public Source License v1.0"]="RPSL-1.0"
LICENSEMAPPINGS["Reciprocal Public License"]="RPL-1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.1"]="RPL-1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.5"]="RPL-1.5"
LICENSEMAPPINGS["Red Hat eCos Public License v1.1"]="RHeCos-1.1"
LICENSEMAPPINGS["Ricoh Source Code Public License"]="RSCPL"
LICENSEMAPPINGS["RSA Message-Digest License"]="RSA-MD"
LICENSEMAPPINGS["Ruby License"]="Ruby"
LICENSEMAPPINGS["Sax Public Domain Notice"]="SAX-PD"
LICENSEMAPPINGS["Saxpath License"]="Saxpath"
LICENSEMAPPINGS["SCEA Shared Source License"]="SCEA"
LICENSEMAPPINGS["Scheme Widget Library (SWL) Software License Agreement"]="SWL"
LICENSEMAPPINGS["Secure Messaging Protocol Public License"]="SMPPL"
LICENSEMAPPINGS["Sendmail License"]="Sendmail"
LICENSEMAPPINGS["SGI Free Software License B v1.0"]="SGI-B-1.0"
LICENSEMAPPINGS["SGI Free Software License B v1.1"]="SGI-B-1.1"
LICENSEMAPPINGS["SGI Free Software License B v2.0"]="SGI-B-2.0"
LICENSEMAPPINGS["SIL Open Font License 1.0"]="OFL-1.0"
LICENSEMAPPINGS["SIL Open Font License 1.1"]="OFL-1.1"
LICENSEMAPPINGS["Simple Public License 2.0"]="SimPL-2.0"
LICENSEMAPPINGS["Sleepycat License"]="Sleepycat"
LICENSEMAPPINGS["SNIA Public License 1.1"]="SNIA"
LICENSEMAPPINGS["Spencer License 86"]="Spencer-86"
LICENSEMAPPINGS["Spencer License 94"]="Spencer-94"
LICENSEMAPPINGS["Spencer License 99"]="Spencer-99"
LICENSEMAPPINGS["Standard ML of New Jersey License"]="SMLNJ"
LICENSEMAPPINGS["SugarCRM Public License v1.1.3"]="SugarCRM-1.1.3"
LICENSEMAPPINGS["Sun Industry Standards Source License (SISSL) v1.1"]="SISSL"
LICENSEMAPPINGS["Sun Industry Standards Source License v1.2"]="SISSL-1.2"
LICENSEMAPPINGS["Sun Public License v1.0"]="SPL-1.0"
LICENSEMAPPINGS["Sybase Open Watcom Public License 1.0"]="Watcom-1.0"
LICENSEMAPPINGS["Tcl License"]="TCL"
LICENSEMAPPINGS["TCP Wrappers License"]="TCP-wrappers"
LICENSEMAPPINGS["The Beerware License"]="Beerware"
LICENSEMAPPINGS["The Code Project Open License (CPOL) 1.02"]="CPOL-1.02"
LICENSEMAPPINGS["The Curl License"]="curl"
LICENSEMAPPINGS["The JSON License"]="JSON"
LICENSEMAPPINGS["The PostgreSQL License"]="PostgreSQL"
LICENSEMAPPINGS["The Unlicense"]="Unlicense"
LICENSEMAPPINGS["TMate License"]="TMate"
LICENSEMAPPINGS["TORQUE v2.5+ Software License v1.1"]="TORQUE-1.1"
LICENSEMAPPINGS["Trusster Open Source License"]="TOSL"
LICENSEMAPPINGS["U-Boot exception 2.0"]="u-boot-exception-2.0"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2015)"]="Unicode-DFS-2015"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2016)"]="Unicode-DFS-2016"
LICENSEMAPPINGS["Unicode Terms of Use"]="Unicode-TOU"
LICENSEMAPPINGS["Universal Permissive License v1.0"]="UPL-1.0"
LICENSEMAPPINGS["University of Illinois/NCSA Open Source License"]="NCSA"
LICENSEMAPPINGS["US Naval Research Laboratory (NRL) v1.1"]="NRL"
LICENSEMAPPINGS["Vim License"]="Vim"
LICENSEMAPPINGS["VOSTROM Public License for Open Source"]="VOSTROM"
LICENSEMAPPINGS["Vovida Software License v1.0"]="VSL-1.0"
LICENSEMAPPINGS["W3C Software Notice and Document License (2015-05-13)"]="W3C-20150513"
LICENSEMAPPINGS["W3C Software Notice and License (1998-07-20)"]="W3C-19980720"
LICENSEMAPPINGS["W3C Software Notice and License (2002-12-31)"]="W3C"
LICENSEMAPPINGS["Wsuipa License"]="Wsuipa"
LICENSEMAPPINGS["WxWindows Library Exception 3.1"]="WxWindows-exception-3.1"
LICENSEMAPPINGS["wxWindows Library Licence, Version 3.1"]="wxWindows"
LICENSEMAPPINGS["X.Net License"]="Xnet"
LICENSEMAPPINGS["X11 License"]="X11"
LICENSEMAPPINGS["Xerox License"]="Xerox"
LICENSEMAPPINGS["XFree86 License 1.1"]="XFree86-1.1"
LICENSEMAPPINGS["xinetd License"]="xinetd"
LICENSEMAPPINGS["XPP License"]="xpp"
LICENSEMAPPINGS["XSkat"]="XSkat"
LICENSEMAPPINGS["Yahoo! Public License v1.0"]="YPL-1.0"
LICENSEMAPPINGS["Yahoo! Public License v1.1"]="YPL-1.1"
LICENSEMAPPINGS["Zed License"]="Zed"
LICENSEMAPPINGS["Zend License v2.0"]="Zend-2.0"
LICENSEMAPPINGS["Zimbra Public License v1.4"]="Zimbra-1.4"
LICENSEMAPPINGS["Zimbra Publice License v1.3"]="Zimbra-1.3"
LICENSEMAPPINGS["zlib License"]="Zlib"
LICENSEMAPPINGS["zlib/libpng License with Acknowledgement"]="zlib-acknowledgement"
LICENSEMAPPINGS["Zope Public License 1.1"]="ZPL-1.1"
LICENSEMAPPINGS["Zope Public License 2.0"]="ZPL-2.0"
LICENSEMAPPINGS["Zope Public License 2.1"]="ZPL-2.1"
'''
LICENSEMAPPINGS["389 Directory Server Exception"] = "389 Directory Server Exception"
LICENSEMAPPINGS["3dfx Glide License"] = "3dfx Glide License"
LICENSEMAPPINGS["3DFX GLIDE Source Code General Public License"] = "3dfx Glide License"
LICENSEMAPPINGS["Abstyles License"] = "Abstyles License"
LICENSEMAPPINGS["Academic Free License v1.1"] = "Academic Free License v1.1"
LICENSEMAPPINGS["Academic Free License v1.2"] = "Academic Free License v1.2"
LICENSEMAPPINGS["Academic Free License v2.0"] = "Academic Free License v2.0"
LICENSEMAPPINGS["Academic Free License v2.1"] = "Academic Free License v2.1"
LICENSEMAPPINGS["Academic Free License v3.0"] = "Academic Free License v3.0"
LICENSEMAPPINGS["Academy of Motion Picture Arts and Sciences BSD"] = "Academy of Motion Picture Arts and Sciences BSD"
LICENSEMAPPINGS["Adaptive Public License 1.0"] = "Adaptive Public License 1.0"
LICENSEMAPPINGS["Adobe Glyph List License"] = "Adobe Glyph List License"
LICENSEMAPPINGS["Adobe Postscript AFM License"] = "Adobe Postscript AFM License"
LICENSEMAPPINGS["Adobe Systems Incorporated Source Code License Agreement"] = "Adobe Systems Incorporated Source Code License Agreement"
LICENSEMAPPINGS["Affero General Public License v1.0"] = "Affero General Public License v1.0"
LICENSEMAPPINGS["Afmparse License"] = "Afmparse License"
LICENSEMAPPINGS["Aladdin Free Public License v8"] = "Aladdin Free Public License"
LICENSEMAPPINGS["Allegro Giftware License"] = "Giftware License"
LICENSEMAPPINGS["Amazon Digital Services License"] = "Amazon Digital Services License"
LICENSEMAPPINGS["AMD's plpa_map.c License"] = "AMD's plpa_map.c License"
LICENSEMAPPINGS["ANTLR Software Rights Notice"] = "ANTLR Software Rights Notice"
LICENSEMAPPINGS["Apache License 1.0"] = "Apache License 1.0"
LICENSEMAPPINGS["Apache License 1.1"] = "Apache License 1.1"
LICENSEMAPPINGS["Apache License 2.0"] = "Apache License 2.0"
LICENSEMAPPINGS["Apple MIT License"] = "Apple MIT License"
LICENSEMAPPINGS["Apple Public Source License 1.0"] = "Apple Public Source License 1.0"
LICENSEMAPPINGS["Apple Public Source License 1.1"] = "Apple Public Source License 1.1"
LICENSEMAPPINGS["Apple Public Source License 1.2"] = "Apple Public Source License 1.2"
LICENSEMAPPINGS["Apple Public Source License 2.0"] = "Apple Public Source License 2.0"
LICENSEMAPPINGS["Artistic License 1.0"] = "Artistic License 1.0"
LICENSEMAPPINGS["Artistic License 1.0 w/clause 8"] = "Artistic License 1.0 w/clause 8"
LICENSEMAPPINGS["Artistic License 2.0"] = "Artistic License 2.0"
LICENSEMAPPINGS["Attribution Assurance License"] = "Attribution Assurance License"
LICENSEMAPPINGS["Autoconf exception 2.0"] = "Autoconf exception 2.0"
LICENSEMAPPINGS["Autoconf exception 3.0"] = "Autoconf exception 3.0"
LICENSEMAPPINGS["Bahyph License"] = "Bahyph License"
LICENSEMAPPINGS["Barr License"] = "Barr License"
LICENSEMAPPINGS["Bison exception 2.2"] = "Bison exception 2.2"
LICENSEMAPPINGS["BitTorrent Open Source License v1.0"] = "BitTorrent Open Source License v1.0"
LICENSEMAPPINGS["BitTorrent Open Source License v1.1"] = "BitTorrent Open Source License v1.1"
LICENSEMAPPINGS["Boost Software License 1.0"] = "Boost Software License 1.0"
LICENSEMAPPINGS["Bootloader Distribution Exception"] = "Bootloader Distribution Exception"
LICENSEMAPPINGS["Borceux license"] = "Borceux license"
LICENSEMAPPINGS["BSD 1-Clause License"] = "BSD 1-Clause License"
LICENSEMAPPINGS["BSD 2-clause \"Simplified\" or \"FreeBSD\" License"] = "BSD 2-clause \"Simplified\" License"
LICENSEMAPPINGS["BSD 2-clause FreeBSD License"] = "BSD 2-clause FreeBSD License"
LICENSEMAPPINGS["BSD 2-clause NetBSD License"] = "BSD 2-clause NetBSD License"
LICENSEMAPPINGS["BSD 3-clause \"New\" or \"Revised\" License"] = "BSD 3-clause \"New\" or \"Revised\" License"
LICENSEMAPPINGS["BSD 3-clause Clear License"] = "BSD 3-clause Clear License"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License"] = "BSD 3-Clause No Nuclear License"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License 2014"] = "BSD 3-Clause No Nuclear License 2014"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear Warranty"] = "BSD 3-Clause No Nuclear Warranty"
LICENSEMAPPINGS["BSD 4-clause \"Original\" or \"Old\" License"] = "BSD 4-clause \"Original\" or \"Old\" License"
LICENSEMAPPINGS["BSD Protection License"] = "BSD Protection License"
LICENSEMAPPINGS["BSD Source Code Attribution"] = "BSD Source Code Attribution"
LICENSEMAPPINGS["BSD with attribution"] = "BSD with attribution"
LICENSEMAPPINGS["BSD Zero Clause License"] = "BSD Zero Clause License"
LICENSEMAPPINGS["BSD-2-Clause Plus Patent License"] = "BSD-2-Clause Plus Patent License"
LICENSEMAPPINGS["BSD-4-Clause (University of California-Specific)"] = "BSD-4-Clause (University of California-Specific)"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.5"] = "bzip2 and libbzip2 License v1.0.5"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.6"] = "bzip2 and libbzip2 License v1.0.6"
LICENSEMAPPINGS["Caldera License"] = "Caldera License"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.0"] = "CeCILL Free Software License Agreement v1.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.1"] = "CeCILL Free Software License Agreement v1.1"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.0"] = "CeCILL Free Software License Agreement v2.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.1"] = "CeCILL Free Software License Agreement v2.1"
LICENSEMAPPINGS["CeCILL-B Free Software License Agreement v1.0"] = "CeCILL-B Free Software License Agreement"
LICENSEMAPPINGS["CeCILL-C Free Software License Agreement v1.0"] = "CeCILL-C Free Software License Agreement"
LICENSEMAPPINGS["Clarified Artistic License"] = "Clarified Artistic License"
LICENSEMAPPINGS["Classpath exception 2.0"] = "Classpath exception 2.0"
LICENSEMAPPINGS["CLISP exception 2.0"] = "CLISP exception 2.0"
LICENSEMAPPINGS["CMU License"] = "CMU License"
LICENSEMAPPINGS["CNRI Python License"] = "CNRI Python License"
LICENSEMAPPINGS["CNRI Python Open Source GPL Compatible License Agreement"] = "CNRI Python Open Source GPL Compatible License Agreement"
LICENSEMAPPINGS["Common Development and Distribution License 1.0"] = "Common Development and Distribution License 1.0"
LICENSEMAPPINGS["Common Development and Distribution License 1.1"] = "Common Development and Distribution License 1.1"
LICENSEMAPPINGS["Common Public Attribution License 1.0"] = "Common Public Attribution License 1.0"
LICENSEMAPPINGS["Common Public License"] = "Common Public License 1.0"
LICENSEMAPPINGS["Community Data License Agreement Permissive 1.0"] = "Community Data License Agreement Permissive 1.0"
LICENSEMAPPINGS["Community Data License Agreement Sharing 1.0"] = "Community Data License Agreement Sharing 1.0"
LICENSEMAPPINGS["Computer Associates Trusted Open Source License 1.1"] = "Computer Associates Trusted Open Source License 1.1"
LICENSEMAPPINGS["Condor Public License v1.1"] = "Condor Public License v1.1"
LICENSEMAPPINGS["Creative Commons Attribution 1.0"] = "Creative Commons Attribution 1.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.0"] = "Creative Commons Attribution 2.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.5"] = "Creative Commons Attribution 2.5"
LICENSEMAPPINGS["Creative Commons Attribution 3.0"] = "Creative Commons Attribution 3.0"
LICENSEMAPPINGS["Creative Commons Attribution 4.0"] = "Creative Commons Attribution 4.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 1.0"] = "Creative Commons Attribution No Derivatives 1.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.0"] = "Creative Commons Attribution No Derivatives 2.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.5"] = "Creative Commons Attribution No Derivatives 2.5"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 3.0"] = "Creative Commons Attribution No Derivatives 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 1.0"] = "Creative Commons Attribution Non Commercial 1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.0"] = "Creative Commons Attribution Non Commercial 2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.5"] = "Creative Commons Attribution Non Commercial 2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 3.0"] = "Creative Commons Attribution Non Commercial 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 1.0"] = "Creative Commons Attribution Non Commercial No Derivatives 1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.0"] = "Creative Commons Attribution Non Commercial No Derivatives 2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.5"] = "Creative Commons Attribution Non Commercial No Derivatives 2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 3.0"] = "Creative Commons Attribution Non Commercial No Derivatives 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 1.0"] = "Creative Commons Attribution Non Commercial Share Alike 1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.0"] = "Creative Commons Attribution Non Commercial Share Alike 2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.5"] = "Creative Commons Attribution Non Commercial Share Alike 2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 3.0"] = "Creative Commons Attribution Non Commercial Share Alike 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 1.0"] = "Creative Commons Attribution Share Alike 1.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.0"] = "Creative Commons Attribution Share Alike 2.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.5"] = "Creative Commons Attribution Share Alike 2.5"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 3.0"] = "Creative Commons Attribution Share Alike 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 4.0"] = "Creative Commons Attribution Share Alike 4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NoDerivatives 4.0"] = "Creative Commons Attribution No Derivatives 4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial 4.0"] = "Creative Commons Attribution Non Commercial 4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-NoDerivatives 4.0"] = "Creative Commons Attribution Non Commercial No Derivatives 4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-ShareAlike 4.0"] = "Creative Commons Attribution Non Commercial Share Alike 4.0"
LICENSEMAPPINGS["Creative Commons CC0 1.0 Universal"] = "Creative Commons Zero v1.0 Universal"
LICENSEMAPPINGS["Crossword License"] = "Crossword License"
LICENSEMAPPINGS["CrystalStacker License"] = "CrystalStacker License"
LICENSEMAPPINGS["CUA Office Public License v1.0"] = "CUA Office Public License v1.0"
LICENSEMAPPINGS["Cube License"] = "Cube License"
LICENSEMAPPINGS["Deutsche Freie Software Lizenz"] = "Deutsche Freie Software Lizenz"
LICENSEMAPPINGS["diffmark license"] = "diffmark license"
LICENSEMAPPINGS["DigiRule FOSS License Exception"] = "DigiRule FOSS License Exception"
LICENSEMAPPINGS["Do What The Fuck You Want To Public License"] = "Do What The F*ck You Want To Public License"
LICENSEMAPPINGS["DOC Software License"] = "DOC License"
LICENSEMAPPINGS["Dotseqn License"] = "Dotseqn License"
LICENSEMAPPINGS["DSDP License"] = "DSDP License"
LICENSEMAPPINGS["dvipdfm License"] = "dvipdfm License"
LICENSEMAPPINGS["Eclipse Public License 1.0"] = "Eclipse Public License 1.0"
LICENSEMAPPINGS["Eclipse Public License 2.0"] = "Eclipse Public License 2.0"
LICENSEMAPPINGS["eCos exception 2.0"] = "eCos exception 2.0"
LICENSEMAPPINGS["eCos license version 2.0"] = "eCos license version 2.0"
LICENSEMAPPINGS["Educational Community License v1.0"] = "Educational Community License v1.0"
LICENSEMAPPINGS["Educational Community License v2.0"] = "Educational Community License v2.0"
LICENSEMAPPINGS["eGenix.com Public License 1.1.0"] = "eGenix.com Public License 1.1.0"
LICENSEMAPPINGS["Eiffel Forum License v1.0"] = "Eiffel Forum License v1.0"
LICENSEMAPPINGS["Eiffel Forum License v2.0"] = "Eiffel Forum License v2.0"
LICENSEMAPPINGS["Enlightenment License (e16)"] = "Enlightenment License (e16)"
LICENSEMAPPINGS["enna License"] = "enna License"
LICENSEMAPPINGS["Entessa Public License"] = "Entessa Public License v1.0"
LICENSEMAPPINGS["Erlang Public License v1.1"] = "Erlang Public License v1.1"
LICENSEMAPPINGS["EU DataGrid Software License"] = "EU DataGrid Software License"
LICENSEMAPPINGS["European Union Public License 1.0"] = "European Union Public License 1.0"
LICENSEMAPPINGS["European Union Public License 1.1"] = "European Union Public License 1.1"
LICENSEMAPPINGS["European Union Public License 1.2"] = "European Union Public License 1.2"
LICENSEMAPPINGS["Eurosym License v2"] = "Eurosym License"
LICENSEMAPPINGS["Fair License"] = "Fair License"
LICENSEMAPPINGS["FastCGI"] = "Open Market License"
LICENSEMAPPINGS["Fawkes Runtime Exception"] = "Fawkes Runtime Exception"
LICENSEMAPPINGS["feh License"] = "feh License"
LICENSEMAPPINGS["FLTK exception"] = "FLTK exception"
LICENSEMAPPINGS["Font exception 2.0"] = "Font exception 2.0"
LICENSEMAPPINGS["Frameworx Open License 1.0"] = "Frameworx Open License 1.0"
LICENSEMAPPINGS["FreeImage Public License v1.0"] = "FreeImage Public License v1.0"
LICENSEMAPPINGS["FreeRTOS Exception 2.0"] = "FreeRTOS Exception 2.0"
LICENSEMAPPINGS["FreeType License"] = "Freetype Project License"
LICENSEMAPPINGS["FSF All Permissive License"] = "FSF All Permissive License"
LICENSEMAPPINGS["FSF Unlimited License"] = "FSF Unlimited License"
LICENSEMAPPINGS["FSF Unlimited License (with License Retention)"] = "FSF Unlimited License (with License Retention)"
LICENSEMAPPINGS["GCC Runtime Library exception 2.0"] = "GCC Runtime Library exception 2.0"
LICENSEMAPPINGS["GCC Runtime Library exception 3.1"] = "GCC Runtime Library exception 3.1"
LICENSEMAPPINGS["GL2PS License, Version 2"] = "GL2PS License"
LICENSEMAPPINGS["Glulxe License"] = "Glulxe License"
LICENSEMAPPINGS["GNU Affero General Public License v3.0"] = "GNU Affero General Public License v3.0"
LICENSEMAPPINGS["GNU Free Documentation License v1.1"] = "GNU Free Documentation License v1.1"
LICENSEMAPPINGS["GNU Free Documentation License v1.2"] = "GNU Free Documentation License v1.2"
LICENSEMAPPINGS["GNU Free Documentation License v1.3"] = "GNU Free Documentation License v1.3"
LICENSEMAPPINGS["GNU General Public License v1.0"] = "GNU General Public License v1.0 only"
LICENSEMAPPINGS["GNU General Public License v1.0 or later"] = "GNU General Public License v1.0 or later"
LICENSEMAPPINGS["GNU General Public License v2.0"] = "GNU General Public License v2.0 only"
LICENSEMAPPINGS["GNU General Public License v2.0 or later"] = "GNU General Public License v2.0 or later"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Autoconf exception"] = "GNU General Public License v2.0 w/Autoconf exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Bison exception"] = "GNU General Public License v2.0 w/Bison exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Font exception"] = "GNU General Public License v2.0 w/Font exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/GCC Runtime Library exception"] = "GNU General Public License v2.0 w/GCC Runtime Library exception"
LICENSEMAPPINGS["GNU General Public License v2.0 with Classpath Exception"] = "GNU General Public License v2.0 w/Classpath exception"
LICENSEMAPPINGS["GNU General Public License v3.0"] = "GNU General Public License v3.0 only"
LICENSEMAPPINGS["GNU General Public License v3.0 or later"] = "GNU General Public License v3.0 or later"
LICENSEMAPPINGS["GNU General Public License v3.0 w/Autoconf exception"] = "GNU General Public License v3.0 w/Autoconf exception"
LICENSEMAPPINGS["GNU General Public License v3.0 w/GCC Runtime Library exception"] = "GNU General Public License v3.0 w/GCC Runtime Library exception"
LICENSEMAPPINGS["GNU JavaMail exception"] = "GNU JavaMail exception"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1"] = "GNU Lesser General Public License v2.1 only"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1 or later"] = "GNU Lesser General Public License v2.1 or later"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0"] = "GNU Lesser General Public License v3.0 only"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0 or later"] = "GNU Lesser General Public License v3.0 or later"
LICENSEMAPPINGS["GNU Library General Public License v2.0"] = "GNU Library General Public License v2 only"
LICENSEMAPPINGS["GNU Library General Public License v2.0 or later"] = "GNU Library General Public License v2 or later"
LICENSEMAPPINGS["gnuplot License"] = "gnuplot License"
LICENSEMAPPINGS["gSOAP Public License v1.3b"] = "gSOAP Public License v1.3b"
LICENSEMAPPINGS["Haskell Language Report License"] = "Haskell Language Report License"
LICENSEMAPPINGS["Historic Permission Notice and Disclaimer"] = "Historic Permission Notice and Disclaimer"
LICENSEMAPPINGS["i2p GPL+Java Exception"] = "i2p GPL+Java Exception"
LICENSEMAPPINGS["IBM PowerPC Initialization and Boot Software"] = "IBM PowerPC Initialization and Boot Software"
LICENSEMAPPINGS["IBM Public License v1.0"] = "IBM Public License v1.0"
LICENSEMAPPINGS["ICU License"] = "ICU License"
LICENSEMAPPINGS["ImageMagick (Apache 2.0) License"] = "ImageMagick License"
LICENSEMAPPINGS["iMatix Standard Function Library Agreement"] = "iMatix Standard Function Library Agreement"
LICENSEMAPPINGS["Imlib2 License"] = "Imlib2 License"
LICENSEMAPPINGS["Independent JPEG Group License"] = "Independent JPEG Group License"
LICENSEMAPPINGS["Info-ZIP License"] = "Info-ZIP License"
LICENSEMAPPINGS["Intel ACPI Software License Agreement"] = "Intel ACPI Software License Agreement"
LICENSEMAPPINGS["Intel Open Source License"] = "Intel Open Source License"
LICENSEMAPPINGS["Interbase Public License v1.0"] = "Interbase Public License v1.0"
LICENSEMAPPINGS["IPA Font License"] = "IPA Font License"
LICENSEMAPPINGS["ISC License (ISC)"] = "ISC License"
LICENSEMAPPINGS["JasPer License Version 2.0"] = "JasPer License"
LICENSEMAPPINGS["Jython License"] = "CNRI Jython License"
LICENSEMAPPINGS["LaTeX Project Public License v1.0"] = "LaTeX Project Public License v1.0"
LICENSEMAPPINGS["LaTeX Project Public License v1.1"] = "LaTeX Project Public License v1.1"
LICENSEMAPPINGS["LaTeX Project Public License v1.2"] = "LaTeX Project Public License v1.2"
LICENSEMAPPINGS["LaTeX Project Public License v1.3a"] = "LaTeX Project Public License v1.3a"
LICENSEMAPPINGS["LaTeX Project Public License v1.3c"] = "LaTeX Project Public License v1.3c"
LICENSEMAPPINGS["Latex2e License"] = "Latex2e License"
LICENSEMAPPINGS["Lawrence Berkeley National Labs BSD variant license"] = "Lawrence Berkeley National Labs BSD variant license"
LICENSEMAPPINGS["Leptonica License"] = "Leptonica License"
LICENSEMAPPINGS["Lesser General Public License For Linguistic Resources"] = "Lesser General Public License For Linguistic Resources"
LICENSEMAPPINGS["libpng License"] = "libpng License"
LICENSEMAPPINGS["libtiff License"] = "libtiff License"
LICENSEMAPPINGS["Libtool Exception"] = "Libtool Exception"
LICENSEMAPPINGS["Licence Art Libre 1.2"] = "Licence Art Libre 1.2"
LICENSEMAPPINGS["Licence Art Libre 1.3"] = "Licence Art Libre 1.3"
LICENSEMAPPINGS["Licence Libre du Québec – Permissive version 1.1"] = "Licence Libre du Québec – Permissive version 1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité forte version 1.1"] = "Licence Libre du Québec – Réciprocité forte version 1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité version 1.1"] = "Licence Libre du Québec – Réciprocité version 1.1"
LICENSEMAPPINGS["Linux Kernel Variant of OpenIB.org license"] = "Linux Kernel Variant of OpenIB.org license"
LICENSEMAPPINGS["Linux Syscall Note"] = "Linux Syscall Note"
LICENSEMAPPINGS["LLVM Exception"] = "LLVM Exception"
LICENSEMAPPINGS["Lucent Public License v1.0"] = "Lucent Public License Version 1.0"
LICENSEMAPPINGS["Lucent Public License v1.02 (Plan9)"] = "Lucent Public License v1.02"
LICENSEMAPPINGS["LZMA exception"] = "LZMA exception"
LICENSEMAPPINGS["Macros and Inline Functions Exception"] = "Macros and Inline Functions Exception"
LICENSEMAPPINGS["MakeIndex License"] = "MakeIndex License"
LICENSEMAPPINGS["Matrix Template Library License"] = "Matrix Template Library License"
LICENSEMAPPINGS["Microsoft Public License (Ms-PL)"] = "Microsoft Public License"
LICENSEMAPPINGS["Microsoft Reciprocal License (Ms-RL)"] = "Microsoft Reciprocal License"
LICENSEMAPPINGS["MirOS Licence"] = "MirOS Licence"
LICENSEMAPPINGS["MIT +no-false-attribs license"] = "MIT +no-false-attribs license"
LICENSEMAPPINGS["MIT License (Expat)"] = "MIT license"
LICENSEMAPPINGS["MIT No Attribution"] = "MIT No Attribution"
LICENSEMAPPINGS["Motosoto License"] = "Motosoto License"
LICENSEMAPPINGS["Mozilla Public License 1.0"] = "Mozilla Public License 1.0"
LICENSEMAPPINGS["Mozilla Public License 1.1"] = "Mozilla Public License 1.1"
LICENSEMAPPINGS["Mozilla Public License 2.0"] = "Mozilla Public License 2.0"
LICENSEMAPPINGS["Mozilla Public License 2.0 (no copyleft exception)"] = "Mozilla Public License 2.0 (no copyleft exception)"
LICENSEMAPPINGS["MPICH2 License"] = "mpich2 License"
LICENSEMAPPINGS["Multics License"] = "Multics License"
LICENSEMAPPINGS["Mup License"] = "Mup License"
LICENSEMAPPINGS["NASA Open Source Agreement 1.3"] = "NASA Open Source Agreement 1.3"
LICENSEMAPPINGS["Naumen Public License"] = "Naumen Public License"
LICENSEMAPPINGS["Net Boolean Public License v1"] = "Net Boolean Public License v1"
LICENSEMAPPINGS["netCDF License"] = "NetCDF license"
LICENSEMAPPINGS["Nethack General Public License"] = "Nethack General Public License"
LICENSEMAPPINGS["Netizen Open Source License v1.0"] = "Netizen Open Source License"
LICENSEMAPPINGS["Netscape Public License 1.0"] = "Netscape Public License v1.0"
LICENSEMAPPINGS["Netscape Public License 1.1"] = "Netscape Public License v1.1"
LICENSEMAPPINGS["Net-SNMP License"] = "Net-SNMP License"
LICENSEMAPPINGS["Newsletr License"] = "Newsletr License"
LICENSEMAPPINGS["No Limit Public License"] = "No Limit Public License"
LICENSEMAPPINGS["Nokia Open Source License"] = "Nokia Open Source License"
LICENSEMAPPINGS["Nokia Qt LGPL exception 1.1"] = "Nokia Qt LGPL exception 1.1"
LICENSEMAPPINGS["Non-Profit Open Software License 3.0"] = "Non-Profit Open Software License 3.0"
LICENSEMAPPINGS["Norwegian Licence for Open Government Data"] = "Norwegian Licence for Open Government Data"
LICENSEMAPPINGS["Noweb License"] = "Noweb License"
LICENSEMAPPINGS["NTP License"] = "NTP License"
LICENSEMAPPINGS["Nunit License"] = "Nunit License"
LICENSEMAPPINGS["OCLC Research Public License 2.0"] = "OCLC Research Public License 2.0"
LICENSEMAPPINGS["ODC Open Database License v1.0"] = "ODC Open Database License v1.0"
LICENSEMAPPINGS["ODC Public Domain Dedication & License 1.0"] = "ODC Public Domain Dedication & License 1.0"
LICENSEMAPPINGS["Open CASCADE Exception 1.0"] = "Open CASCADE Exception 1.0"
LICENSEMAPPINGS["Open CASCADE Technology Public License"] = "Open CASCADE Technology Public License"
LICENSEMAPPINGS["Open Group Test Suite License"] = "Open Group Test Suite License"
LICENSEMAPPINGS["Open LDAP Public License 2.2.2"] = "Open LDAP Public License 2.2.2"
LICENSEMAPPINGS["Open LDAP Public License v1.1"] = "Open LDAP Public License v1.1"
LICENSEMAPPINGS["Open LDAP Public License v1.3"] = "Open LDAP Public License v1.3"
LICENSEMAPPINGS["Open LDAP Public License v1.4"] = "Open LDAP Public License v1.4"
LICENSEMAPPINGS["Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)"] = "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)"
LICENSEMAPPINGS["Open LDAP Public License v2.1"] = "Open LDAP Public License v2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.2"] = "Open LDAP Public License v2.2"
LICENSEMAPPINGS["Open LDAP Public License v2.2.1"] = "Open LDAP Public License v2.2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.5"] = "Open LDAP Public License v2.5"
LICENSEMAPPINGS["Open LDAP Public License v2.6"] = "Open LDAP Public License v2.6"
LICENSEMAPPINGS["Open Public License v1.0"] = "Open Public License v1.0"
LICENSEMAPPINGS["Open Software License 1.0"] = "Open Software License 1.0"
LICENSEMAPPINGS["Open Software License 1.1"] = "Open Software License 1.1"
LICENSEMAPPINGS["Open Software License 2.0"] = "Open Software License 2.0"
LICENSEMAPPINGS["Open Software License 2.1"] = "Open Software License 2.1"
LICENSEMAPPINGS["Open Software License 3.0"] = "Open Software License 3.0"
LICENSEMAPPINGS["OpenJDK Assembly exception 1.0"] = "OpenJDK Assembly exception 1.0"
LICENSEMAPPINGS["OpenLDAP Public License v1.2"] = "Open LDAP Public License v1.2"
LICENSEMAPPINGS["OpenLDAP Public License v2.0.1"] = "Open LDAP Public License v2.0.1"
LICENSEMAPPINGS["OpenLDAP Public License v2.3"] = "Open LDAP Public License v2.3"
LICENSEMAPPINGS["OpenLDAP Public License v2.4"] = "Open LDAP Public License v2.4"
LICENSEMAPPINGS["OpenLDAP Public License v2.7"] = "Open LDAP Public License v2.7"
LICENSEMAPPINGS["OpenLDAP Public License v2.8"] = "Open LDAP Public License v2.8"
LICENSEMAPPINGS["OpenSSL License"] = "OpenSSL License"
LICENSEMAPPINGS["OpenVPN OpenSSL Exception"] = "OpenVPN OpenSSL Exception"
LICENSEMAPPINGS["OSET Public License version 2.1"] = "OSET Public License version 2.1"
LICENSEMAPPINGS["PERL Artistic License"] = "Artistic License 1.0 (Perl)"
LICENSEMAPPINGS["PHP License v3.0"] = "PHP License v3.0"
LICENSEMAPPINGS["PHP License v3.01"] = "PHP License v3.01"
LICENSEMAPPINGS["Plexus Classworlds License"] = "Plexus Classworlds License"
LICENSEMAPPINGS["psfrag License"] = "psfrag License"
LICENSEMAPPINGS["psutils License"] = "psutils License"
LICENSEMAPPINGS["Python License 2.0"] = "Python License 2.0"
LICENSEMAPPINGS["Q Public License 1.0"] = "Q Public License 1.0"
LICENSEMAPPINGS["Qhull License"] = "Qhull License"
LICENSEMAPPINGS["Qwt exception 1.0"] = "Qwt exception 1.0"
LICENSEMAPPINGS["Rdisc License"] = "Rdisc License"
LICENSEMAPPINGS["RealNetworks Public Source License v1.0"] = "RealNetworks Public Source License v1.0"
LICENSEMAPPINGS["Reciprocal Public License"] = "Reciprocal Public License 1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.1"] = "Reciprocal Public License 1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.5"] = "Reciprocal Public License 1.5"
LICENSEMAPPINGS["Red Hat eCos Public License v1.1"] = "Red Hat eCos Public License v1.1"
LICENSEMAPPINGS["Ricoh Source Code Public License"] = "Ricoh Source Code Public License"
LICENSEMAPPINGS["RSA Message-Digest License"] = "RSA Message-Digest License"
LICENSEMAPPINGS["Ruby License"] = "Ruby License"
LICENSEMAPPINGS["Sax Public Domain Notice"] = "Sax Public Domain Notice"
LICENSEMAPPINGS["Saxpath License"] = "Saxpath License"
LICENSEMAPPINGS["SCEA Shared Source License"] = "SCEA Shared Source License"
LICENSEMAPPINGS["Scheme Widget Library (SWL) Software License Agreement"] = "Scheme Widget Library (SWL) Software License Agreement"
LICENSEMAPPINGS["Secure Messaging Protocol Public License"] = "Secure Messaging Protocol Public License"
LICENSEMAPPINGS["Sendmail License"] = "Sendmail License"
LICENSEMAPPINGS["SGI Free Software License B v1.0"] = "SGI Free Software License B v1.0"
LICENSEMAPPINGS["SGI Free Software License B v1.1"] = "SGI Free Software License B v1.1"
LICENSEMAPPINGS["SGI Free Software License B v2.0"] = "SGI Free Software License B v2.0"
LICENSEMAPPINGS["SIL Open Font License 1.0"] = "SIL Open Font License 1.0"
LICENSEMAPPINGS["SIL Open Font License 1.1"] = "SIL Open Font License 1.1"
LICENSEMAPPINGS["Simple Public License 2.0"] = "Simple Public License 2.0"
LICENSEMAPPINGS["Sleepycat License"] = "Sleepycat License"
LICENSEMAPPINGS["SNIA Public License 1.1"] = "SNIA Public License 1.1"
LICENSEMAPPINGS["Spencer License 86"] = "Spencer License 86"
LICENSEMAPPINGS["Spencer License 94"] = "Spencer License 94"
LICENSEMAPPINGS["Spencer License 99"] = "Spencer License 99"
LICENSEMAPPINGS["Standard ML of New Jersey License"] = "Standard ML of New Jersey License"
LICENSEMAPPINGS["SugarCRM Public License v1.1.3"] = "SugarCRM Public License v1.1.3"
LICENSEMAPPINGS["Sun Industry Standards Source License (SISSL) v1.1"] = "Sun Industry Standards Source License v1.1"
LICENSEMAPPINGS["Sun Industry Standards Source License v1.2"] = "Sun Industry Standards Source License v1.2"
LICENSEMAPPINGS["Sun Public License v1.0"] = "Sun Public License v1.0"
LICENSEMAPPINGS["Sybase Open Watcom Public License 1.0"] = "Sybase Open Watcom Public License 1.0"
LICENSEMAPPINGS["Tcl License"] = "TCL/TK License"
LICENSEMAPPINGS["TCP Wrappers License"] = "TCP Wrappers License"
LICENSEMAPPINGS["The Beerware License"] = "Beerware License"
LICENSEMAPPINGS["The Code Project Open License (CPOL) 1.02"] = "Code Project Open License 1.02"
LICENSEMAPPINGS["The Curl License"] = "curl License"
LICENSEMAPPINGS["The JSON License"] = "JSON License"
LICENSEMAPPINGS["The PostgreSQL License"] = "PostgreSQL License"
LICENSEMAPPINGS["The Unlicense"] = "The Unlicense"
LICENSEMAPPINGS["TMate License"] = "TMate Open Source License"
LICENSEMAPPINGS["TORQUE v2.5+ Software License v1.1"] = "TORQUE v2.5+ Software License v1.1"
LICENSEMAPPINGS["Trusster Open Source License"] = "Trusster Open Source License"
LICENSEMAPPINGS["U-Boot exception 2.0"] = "U-Boot exception 2.0"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2015)"] = "Unicode License Agreement - Data Files and Software (2015)"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2016)"] = "Unicode License Agreement - Data Files and Software (2016)"
LICENSEMAPPINGS["Unicode Terms of Use"] = "Unicode Terms of Use"
LICENSEMAPPINGS["Universal Permissive License v1.0"] = "Universal Permissive License v1.0"
LICENSEMAPPINGS["University of Illinois/NCSA Open Source License"] = "University of Illinois/NCSA Open Source License"
LICENSEMAPPINGS["US Naval Research Laboratory (NRL) v1.1"] = "NRL License"
LICENSEMAPPINGS["Vim License"] = "Vim License"
LICENSEMAPPINGS["VOSTROM Public License for Open Source"] = "VOSTROM Public License for Open Source"
LICENSEMAPPINGS["Vovida Software License v1.0"] = "Vovida Software License v1.0"
LICENSEMAPPINGS["W3C Software Notice and Document License (2015-05-13)"] = "W3C Software Notice and Document License (2015-05-13)"
LICENSEMAPPINGS["W3C Software Notice and License (1998-07-20)"] = "W3C Software Notice and License (1998-07-20)"
LICENSEMAPPINGS["W3C Software Notice and License (2002-12-31)"] = "W3C Software Notice and License (2002-12-31)"
LICENSEMAPPINGS["Wsuipa License"] = "Wsuipa License"
LICENSEMAPPINGS["WxWindows Library Exception 3.1"] = "WxWindows Library Exception 3.1"
LICENSEMAPPINGS["wxWindows Library Licence, Version 3.1"] = "wxWindows Library License"
LICENSEMAPPINGS["X.Net License"] = "X.Net License"
LICENSEMAPPINGS["X11 License"] = "X11 License"
LICENSEMAPPINGS["Xerox License"] = "Xerox License"
LICENSEMAPPINGS["XFree86 License 1.1"] = "XFree86 License 1.1"
LICENSEMAPPINGS["xinetd License"] = "xinetd License"
LICENSEMAPPINGS["XPP License"] = "XPP License"
LICENSEMAPPINGS["XSkat"] = "XSkat License"
LICENSEMAPPINGS["Yahoo! Public License v1.0"] = "Yahoo! Public License v1.0"
LICENSEMAPPINGS["Yahoo! Public License v1.1"] = "Yahoo! Public License v1.1"
LICENSEMAPPINGS["Zed License"] = "Zed License"
LICENSEMAPPINGS["Zend License v2.0"] = "Zend License v2.0"
LICENSEMAPPINGS["Zimbra Public License v1.4"] = "Zimbra Public License v1.4"
LICENSEMAPPINGS["Zimbra Publice License v1.3"] = "Zimbra Public License v1.3"
LICENSEMAPPINGS["zlib License"] = "zlib License"
LICENSEMAPPINGS["zlib/libpng License with Acknowledgement"] = "zlib/libpng License with Acknowledgement"
LICENSEMAPPINGS["Zope Public License 1.1"] = "Zope Public License 1.1"
LICENSEMAPPINGS["Zope Public License 2.0"] = "Zope Public License 2.0"
LICENSEMAPPINGS["Zope Public License 2.1"] = "Zope Public License 2.1"
'''
| '''
Copyright 2020 Flexera Software LLC
See LICENSE.TXT for full license text
SPDX-License-Identifier: MIT
Author : sgeary
Created On : Fri May 01 2020
File : licenses.py
'''
LICENSEMAPPINGS = {}
LICENSEMAPPINGS["I don't know"] = "Undeclared"
LICENSEMAPPINGS["389 Directory Server Exception"]="389-exception"
LICENSEMAPPINGS["3dfx Glide License"]="Glide"
LICENSEMAPPINGS["3DFX GLIDE Source Code General Public License"]="Glide"
LICENSEMAPPINGS["Abstyles License"]="Abstyles"
LICENSEMAPPINGS["Academic Free License v1.1"]="AFL-1.1"
LICENSEMAPPINGS["Academic Free License v1.2"]="AFL-1.2"
LICENSEMAPPINGS["Academic Free License v2.0"]="AFL-2.0"
LICENSEMAPPINGS["Academic Free License v2.1"]="AFL-2.1"
LICENSEMAPPINGS["Academic Free License v3.0"]="AFL-3.0"
LICENSEMAPPINGS["Academy of Motion Picture Arts and Sciences BSD"]="AMPAS"
LICENSEMAPPINGS["Adaptive Public License 1.0"]="APL-1.0"
LICENSEMAPPINGS["Adobe Glyph List License"]="Adobe-Glyph"
LICENSEMAPPINGS["Adobe Postscript AFM License"]="APAFML"
LICENSEMAPPINGS["Adobe Systems Incorporated Source Code License Agreement"]="Adobe-2006"
LICENSEMAPPINGS["Affero General Public License v1.0"]="AGPL-1.0"
LICENSEMAPPINGS["Afmparse License"]="Afmparse"
LICENSEMAPPINGS["Aladdin Free Public License v8"]="Aladdin"
LICENSEMAPPINGS["Allegro Giftware License"]="Giftware"
LICENSEMAPPINGS["Amazon Digital Services License"]="ADSL"
LICENSEMAPPINGS["AMD's plpa_map.c License"]="AMDPLPA"
LICENSEMAPPINGS["ANTLR Software Rights Notice"]="ANTLR-PD"
LICENSEMAPPINGS["Apache License 1.0"]="Apache-1.0"
LICENSEMAPPINGS["Apache License 1.1"]="Apache-1.1"
LICENSEMAPPINGS["Apache License 2.0"]="Apache-2.0"
LICENSEMAPPINGS["Apple MIT License"]="AML"
LICENSEMAPPINGS["Apple Public Source License 1.0"]="APSL-1.0"
LICENSEMAPPINGS["Apple Public Source License 1.1"]="APSL-1.1"
LICENSEMAPPINGS["Apple Public Source License 1.2"]="APSL-1.2"
LICENSEMAPPINGS["Apple Public Source License 2.0"]="APSL-2.0"
LICENSEMAPPINGS["Artistic License 1.0"]="Artistic-1.0"
LICENSEMAPPINGS["Artistic License 1.0 w/clause 8"]="Artistic-1.0-cl8"
LICENSEMAPPINGS["Artistic License 2.0"]="Artistic-2.0"
LICENSEMAPPINGS["Attribution Assurance License"]="AAL"
LICENSEMAPPINGS["Autoconf exception 2.0"]="Autoconf-exception-2.0"
LICENSEMAPPINGS["Autoconf exception 3.0"]="Autoconf-exception-3.0"
LICENSEMAPPINGS["Bahyph License"]="Bahyph"
LICENSEMAPPINGS["Barr License"]="Barr"
LICENSEMAPPINGS["Bison exception 2.2"]="Bison-exception-2.2"
LICENSEMAPPINGS["BitTorrent Open Source License v1.0"]="BitTorrent-1.0"
LICENSEMAPPINGS["BitTorrent Open Source License v1.1"]="BitTorrent-1.1"
LICENSEMAPPINGS["Boost Software License 1.0"]="BSL-1.0"
LICENSEMAPPINGS["Bootloader Distribution Exception"]="Bootloader-exception"
LICENSEMAPPINGS["Borceux license"]="Borceux"
LICENSEMAPPINGS["BSD 1-Clause License"]="BSD-1-Clause"
LICENSEMAPPINGS["BSD 2-clause \"Simplified\" or \"FreeBSD\" License"]="BSD-2-Clause"
LICENSEMAPPINGS["BSD 2-clause FreeBSD License"]="BSD-2-Clause-FreeBSD"
LICENSEMAPPINGS["BSD 2-clause NetBSD License"]="BSD-2-Clause-NetBSD"
LICENSEMAPPINGS["BSD 3-clause \"New\" or \"Revised\" License"]="BSD-3-Clause"
LICENSEMAPPINGS["BSD 3-clause Clear License"]="BSD-3-Clause-Clear"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License"]="BSD-3-Clause-No-Nuclear-License"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License 2014"]="BSD-3-Clause-No-Nuclear-License-2014"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear Warranty"]="BSD-3-Clause-No-Nuclear-Warranty"
LICENSEMAPPINGS["BSD 4-clause \"Original\" or \"Old\" License"]="BSD-4-Clause"
LICENSEMAPPINGS["BSD Protection License"]="BSD-Protection"
LICENSEMAPPINGS["BSD Source Code Attribution"]="BSD-Source-Code"
LICENSEMAPPINGS["BSD with attribution"]="BSD-3-Clause-Attribution"
LICENSEMAPPINGS["BSD Zero Clause License"]="0BSD"
LICENSEMAPPINGS["BSD-2-Clause Plus Patent License"]="BSD-2-Clause-Patent"
LICENSEMAPPINGS["BSD-4-Clause (University of California-Specific)"]="BSD-4-Clause-UC"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.5"]="bzip2-1.0.5"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.6"]="bzip2-1.0.6"
LICENSEMAPPINGS["Caldera License"]="Caldera"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.0"]="CECILL-1.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.1"]="CECILL-1.1"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.0"]="CECILL-2.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.1"]="CECILL-2.1"
LICENSEMAPPINGS["CeCILL-B Free Software License Agreement v1.0"]="CECILL-B"
LICENSEMAPPINGS["CeCILL-C Free Software License Agreement v1.0"]="CECILL-C"
LICENSEMAPPINGS["Clarified Artistic License"]="ClArtistic"
LICENSEMAPPINGS["Classpath exception 2.0"]="Classpath-exception-2.0"
LICENSEMAPPINGS["CLISP exception 2.0"]="CLISP-exception-2.0"
LICENSEMAPPINGS["CMU License"]="MIT-CMU"
LICENSEMAPPINGS["CNRI Python License"]="CNRI-Python"
LICENSEMAPPINGS["CNRI Python Open Source GPL Compatible License Agreement"]="CNRI-Python-GPL-Compatible"
LICENSEMAPPINGS["Common Development and Distribution License 1.0"]="CDDL-1.0"
LICENSEMAPPINGS["Common Development and Distribution License 1.1"]="CDDL-1.1"
LICENSEMAPPINGS["Common Public Attribution License 1.0"]="CPAL-1.0"
LICENSEMAPPINGS["Common Public License"]="CPL-1.0"
LICENSEMAPPINGS["Community Data License Agreement Permissive 1.0"]="CDLA-Permissive-1.0"
LICENSEMAPPINGS["Community Data License Agreement Sharing 1.0"]="CDLA-Sharing-1.0"
LICENSEMAPPINGS["Computer Associates Trusted Open Source License 1.1"]="CATOSL-1.1"
LICENSEMAPPINGS["Condor Public License v1.1"]="Condor-1.1"
LICENSEMAPPINGS["Creative Commons Attribution 1.0"]="CC-BY-1.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.0"]="CC-BY-2.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.5"]="CC-BY-2.5"
LICENSEMAPPINGS["Creative Commons Attribution 3.0"]="CC-BY-3.0"
LICENSEMAPPINGS["Creative Commons Attribution 4.0"]="CC-BY-4.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 1.0"]="CC-BY-ND-1.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.0"]="CC-BY-ND-2.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.5"]="CC-BY-ND-2.5"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 3.0"]="CC-BY-ND-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 1.0"]="CC-BY-NC-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.0"]="CC-BY-NC-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.5"]="CC-BY-NC-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 3.0"]="CC-BY-NC-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 1.0"]="CC-BY-NC-ND-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.0"]="CC-BY-NC-ND-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.5"]="CC-BY-NC-ND-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 3.0"]="CC-BY-NC-ND-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 1.0"]="CC-BY-NC-SA-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.0"]="CC-BY-NC-SA-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.5"]="CC-BY-NC-SA-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 3.0"]="CC-BY-NC-SA-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 1.0"]="CC-BY-SA-1.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.0"]="CC-BY-SA-2.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.5"]="CC-BY-SA-2.5"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 3.0"]="CC-BY-SA-3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 4.0"]="CC-BY-SA-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NoDerivatives 4.0"]="CC-BY-ND-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial 4.0"]="CC-BY-NC-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-NoDerivatives 4.0"]="CC-BY-NC-ND-4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-ShareAlike 4.0"]="CC-BY-NC-SA-4.0"
LICENSEMAPPINGS["Creative Commons CC0 1.0 Universal"]="CC0-1.0"
LICENSEMAPPINGS["Crossword License"]="Crossword"
LICENSEMAPPINGS["CrystalStacker License"]="CrystalStacker"
LICENSEMAPPINGS["CUA Office Public License v1.0"]="CUA-OPL-1.0"
LICENSEMAPPINGS["Cube License"]="Cube"
LICENSEMAPPINGS["Deutsche Freie Software Lizenz"]="D-FSL-1.0"
LICENSEMAPPINGS["diffmark license"]="diffmark"
LICENSEMAPPINGS["DigiRule FOSS License Exception"]="DigiRule-FOSS-exception"
LICENSEMAPPINGS["Do What The Fuck You Want To Public License"]="WTFPL"
LICENSEMAPPINGS["DOC Software License"]="DOC"
LICENSEMAPPINGS["Dotseqn License"]="Dotseqn"
LICENSEMAPPINGS["DSDP License"]="DSDP"
LICENSEMAPPINGS["dvipdfm License"]="dvipdfm"
LICENSEMAPPINGS["Eclipse Public License 1.0"]="EPL-1.0"
LICENSEMAPPINGS["Eclipse Public License 2.0"]="EPL-2.0"
LICENSEMAPPINGS["eCos exception 2.0"]="eCos-exception-2.0"
LICENSEMAPPINGS["eCos license version 2.0"]="eCos-2.0"
LICENSEMAPPINGS["Educational Community License v1.0"]="ECL-1.0"
LICENSEMAPPINGS["Educational Community License v2.0"]="ECL-2.0"
LICENSEMAPPINGS["eGenix.com Public License 1.1.0"]="eGenix"
LICENSEMAPPINGS["Eiffel Forum License v1.0"]="EFL-1.0"
LICENSEMAPPINGS["Eiffel Forum License v2.0"]="EFL-2.0"
LICENSEMAPPINGS["Enlightenment License (e16)"]="MIT-advertising"
LICENSEMAPPINGS["enna License"]="MIT-enna"
LICENSEMAPPINGS["Entessa Public License"]="Entessa"
LICENSEMAPPINGS["Erlang Public License v1.1"]="ErlPL-1.1"
LICENSEMAPPINGS["EU DataGrid Software License"]="EUDatagrid"
LICENSEMAPPINGS["European Union Public License 1.0"]="EUPL-1.0"
LICENSEMAPPINGS["European Union Public License 1.1"]="EUPL-1.1"
LICENSEMAPPINGS["European Union Public License 1.2"]="EUPL-1.2"
LICENSEMAPPINGS["Eurosym License v2"]="Eurosym"
LICENSEMAPPINGS["Fair License"]="Fair"
LICENSEMAPPINGS["FastCGI"]="OML"
LICENSEMAPPINGS["Fawkes Runtime Exception"]="Fawkes-Runtime-exception"
LICENSEMAPPINGS["feh License"]="MIT-feh"
LICENSEMAPPINGS["FLTK exception"]="FLTK-exception"
LICENSEMAPPINGS["Font exception 2.0"]="Font-exception-2.0"
LICENSEMAPPINGS["Frameworx Open License 1.0"]="Frameworx-1.0"
LICENSEMAPPINGS["FreeImage Public License v1.0"]="FreeImage"
LICENSEMAPPINGS["FreeRTOS Exception 2.0"]="freertos-exception-2.0"
LICENSEMAPPINGS["FreeType License"]="FTL"
LICENSEMAPPINGS["FSF All Permissive License"]="FSFAP"
LICENSEMAPPINGS["FSF Unlimited License"]="FSFUL"
LICENSEMAPPINGS["FSF Unlimited License (with License Retention)"]="FSFULLR"
LICENSEMAPPINGS["GCC Runtime Library exception 2.0"]="GCC-exception-2.0"
LICENSEMAPPINGS["GCC Runtime Library exception 3.1"]="GCC-exception-3.1"
LICENSEMAPPINGS["GL2PS License, Version 2"]="GL2PS"
LICENSEMAPPINGS["Glulxe License"]="Glulxe"
LICENSEMAPPINGS["GNU Affero General Public License v3.0"]="AGPL-3.0"
LICENSEMAPPINGS["GNU Free Documentation License v1.1"]="GFDL-1.1"
LICENSEMAPPINGS["GNU Free Documentation License v1.2"]="GFDL-1.2"
LICENSEMAPPINGS["GNU Free Documentation License v1.3"]="GFDL-1.3"
LICENSEMAPPINGS["GNU General Public License v1.0"]="GPL-1.0"
LICENSEMAPPINGS["GNU General Public License v1.0 or later"]="GPL-1.0+"
LICENSEMAPPINGS["GNU General Public License v2.0"]="GPL-2.0"
LICENSEMAPPINGS["GNU General Public License v2.0 or later"]="GPL-2.0+"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Autoconf exception"]="GPL-2.0-with-autoconf-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Bison exception"]="GPL-2.0-with-bison-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Font exception"]="GPL-2.0-with-font-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/GCC Runtime Library exception"]="GPL-2.0-with-GCC-exception"
LICENSEMAPPINGS["GNU General Public License v2.0 with Classpath Exception"]="GPL-2.0-with-classpath-exception"
LICENSEMAPPINGS["GNU General Public License v3.0"]="GPL-3.0"
LICENSEMAPPINGS["GNU General Public License v3.0 or later"]="GPL-3.0+"
LICENSEMAPPINGS["GNU General Public License v3.0 w/Autoconf exception"]="GPL-3.0-with-autoconf-exception"
LICENSEMAPPINGS["GNU General Public License v3.0 w/GCC Runtime Library exception"]="GPL-3.0-with-GCC-exception"
LICENSEMAPPINGS["GNU JavaMail exception"]="gnu-javamail-exception"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1"]="LGPL-2.1"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1 or later"]="LGPL-2.1+"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0"]="LGPL-3.0"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0 or later"]="LGPL-3.0+"
LICENSEMAPPINGS["GNU Library General Public License v2.0"]="LGPL-2.0"
LICENSEMAPPINGS["GNU Library General Public License v2.0 or later"]="LGPL-2.0+"
LICENSEMAPPINGS["gnuplot License"]="gnuplot"
LICENSEMAPPINGS["gSOAP Public License v1.3b"]="gSOAP-1.3b"
LICENSEMAPPINGS["Haskell Language Report License"]="HaskellReport"
LICENSEMAPPINGS["Historic Permission Notice and Disclaimer"]="HPND"
LICENSEMAPPINGS["i2p GPL+Java Exception"]="i2p-gpl-java-exception"
LICENSEMAPPINGS["IBM PowerPC Initialization and Boot Software"]="IBM-pibs"
LICENSEMAPPINGS["IBM Public License v1.0"]="IPL-1.0"
LICENSEMAPPINGS["ICU License"]="ICU"
LICENSEMAPPINGS["ImageMagick (Apache 2.0) License"]="ImageMagick"
LICENSEMAPPINGS["iMatix Standard Function Library Agreement"]="iMatix"
LICENSEMAPPINGS["Imlib2 License"]="Imlib2"
LICENSEMAPPINGS["Independent JPEG Group License"]="IJG"
LICENSEMAPPINGS["Info-ZIP License"]="Info-ZIP"
LICENSEMAPPINGS["Intel ACPI Software License Agreement"]="Intel-ACPI"
LICENSEMAPPINGS["Intel Open Source License"]="Intel"
LICENSEMAPPINGS["Interbase Public License v1.0"]="Interbase-1.0"
LICENSEMAPPINGS["IPA Font License"]="IPA"
LICENSEMAPPINGS["ISC License (ISC)"]="ISC"
LICENSEMAPPINGS["JasPer License Version 2.0"]="JasPer-2.0"
LICENSEMAPPINGS["Jython License"]="CNRI-Jython"
LICENSEMAPPINGS["LaTeX Project Public License v1.0"]="LPPL-1.0"
LICENSEMAPPINGS["LaTeX Project Public License v1.1"]="LPPL-1.1"
LICENSEMAPPINGS["LaTeX Project Public License v1.2"]="LPPL-1.2"
LICENSEMAPPINGS["LaTeX Project Public License v1.3a"]="LPPL-1.3a"
LICENSEMAPPINGS["LaTeX Project Public License v1.3c"]="LPPL-1.3c"
LICENSEMAPPINGS["Latex2e License"]="Latex2e"
LICENSEMAPPINGS["Lawrence Berkeley National Labs BSD variant license"]="BSD-3-Clause-LBNL"
LICENSEMAPPINGS["Leptonica License"]="Leptonica"
LICENSEMAPPINGS["Lesser General Public License For Linguistic Resources"]="LGPLLR"
LICENSEMAPPINGS["libpng License"]="Libpng"
LICENSEMAPPINGS["libtiff License"]="libtiff"
LICENSEMAPPINGS["Libtool Exception"]="Libtool-exception"
LICENSEMAPPINGS["Licence Art Libre 1.2"]="LAL-1.2"
LICENSEMAPPINGS["Licence Art Libre 1.3"]="LAL-1.3"
LICENSEMAPPINGS["Licence Libre du Québec – Permissive version 1.1"]="LiLiQ-P-1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité forte version 1.1"]="LiLiQ-Rplus-1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité version 1.1"]="LiLiQ-R-1.1"
LICENSEMAPPINGS["Linux Kernel Variant of OpenIB.org license"]="Linux-OpenIB"
LICENSEMAPPINGS["Linux Syscall Note"]="Linux-syscall-note"
LICENSEMAPPINGS["LLVM Exception"]="LLVM-exception"
LICENSEMAPPINGS["Lucent Public License v1.0"]="LPL-1.0"
LICENSEMAPPINGS["Lucent Public License v1.02 (Plan9)"]="LPL-1.02"
LICENSEMAPPINGS["LZMA exception"]="LZMA-exception"
LICENSEMAPPINGS["Macros and Inline Functions Exception"]="mif-exception"
LICENSEMAPPINGS["MakeIndex License"]="MakeIndex"
LICENSEMAPPINGS["Matrix Template Library License"]="MTLL"
LICENSEMAPPINGS["Microsoft Public License (Ms-PL)"]="MS-PL"
LICENSEMAPPINGS["Microsoft Reciprocal License (Ms-RL)"]="MS-RL"
LICENSEMAPPINGS["MirOS Licence"]="MirOS"
LICENSEMAPPINGS["MIT +no-false-attribs license"]="MITNFA"
LICENSEMAPPINGS["MIT License (Expat)"]="MIT"
LICENSEMAPPINGS["MIT-Style License"]="MIT"
LICENSEMAPPINGS["MIT No Attribution"]="MIT-0"
LICENSEMAPPINGS["Motosoto License"]="Motosoto"
LICENSEMAPPINGS["Mozilla Public License 1.0"]="MPL-1.0"
LICENSEMAPPINGS["Mozilla Public License 1.1"]="MPL-1.1"
LICENSEMAPPINGS["Mozilla Public License 2.0"]="MPL-2.0"
LICENSEMAPPINGS["Mozilla Public License 2.0 (no copyleft exception)"]="MPL-2.0-no-copyleft-exception"
LICENSEMAPPINGS["MPICH2 License"]="mpich2"
LICENSEMAPPINGS["Multics License"]="Multics"
LICENSEMAPPINGS["Mup License"]="Mup"
LICENSEMAPPINGS["NASA Open Source Agreement 1.3"]="NASA-1.3"
LICENSEMAPPINGS["Naumen Public License"]="Naumen"
LICENSEMAPPINGS["Net Boolean Public License v1"]="NBPL-1.0"
LICENSEMAPPINGS["netCDF License"]="NetCDF"
LICENSEMAPPINGS["Nethack General Public License"]="NGPL"
LICENSEMAPPINGS["Netizen Open Source License v1.0"]="NOSL"
LICENSEMAPPINGS["Netscape Public License 1.0"]="NPL-1.0"
LICENSEMAPPINGS["Netscape Public License 1.1"]="NPL-1.1"
LICENSEMAPPINGS["Net-SNMP License"]="Net-SNMP"
LICENSEMAPPINGS["Newsletr License"]="Newsletr"
LICENSEMAPPINGS["No Limit Public License"]="NLPL"
LICENSEMAPPINGS["Nokia Open Source License"]="Nokia"
LICENSEMAPPINGS["Nokia Qt LGPL exception 1.1"]="Nokia-Qt-exception-1.1"
LICENSEMAPPINGS["Non-Profit Open Software License 3.0"]="NPOSL-3.0"
LICENSEMAPPINGS["Norwegian Licence for Open Government Data"]="NLOD-1.0"
LICENSEMAPPINGS["Noweb License"]="Noweb"
LICENSEMAPPINGS["NTP License"]="NTP"
LICENSEMAPPINGS["Nunit License"]="Nunit"
LICENSEMAPPINGS["OCLC Research Public License 2.0"]="OCLC-2.0"
LICENSEMAPPINGS["ODC Open Database License v1.0"]="ODbL-1.0"
LICENSEMAPPINGS["ODC Public Domain Dedication & License 1.0"]="PDDL-1.0"
LICENSEMAPPINGS["Open CASCADE Exception 1.0"]="OCCT-exception-1.0"
LICENSEMAPPINGS["Open CASCADE Technology Public License"]="OCCT-PL"
LICENSEMAPPINGS["Open Group Test Suite License"]="OGTSL"
LICENSEMAPPINGS["Open LDAP Public License 2.2.2"]="OLDAP-2.2.2"
LICENSEMAPPINGS["Open LDAP Public License v1.1"]="OLDAP-1.1"
LICENSEMAPPINGS["Open LDAP Public License v1.3"]="OLDAP-1.3"
LICENSEMAPPINGS["Open LDAP Public License v1.4"]="OLDAP-1.4"
LICENSEMAPPINGS["Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)"]="OLDAP-2.0"
LICENSEMAPPINGS["Open LDAP Public License v2.1"]="OLDAP-2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.2"]="OLDAP-2.2"
LICENSEMAPPINGS["Open LDAP Public License v2.2.1"]="OLDAP-2.2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.5"]="OLDAP-2.5"
LICENSEMAPPINGS["Open LDAP Public License v2.6"]="OLDAP-2.6"
LICENSEMAPPINGS["Open Public License v1.0"]="OPL-1.0"
LICENSEMAPPINGS["Open Software License 1.0"]="OSL-1.0"
LICENSEMAPPINGS["Open Software License 1.1"]="OSL-1.1"
LICENSEMAPPINGS["Open Software License 2.0"]="OSL-2.0"
LICENSEMAPPINGS["Open Software License 2.1"]="OSL-2.1"
LICENSEMAPPINGS["Open Software License 3.0"]="OSL-3.0"
LICENSEMAPPINGS["OpenJDK Assembly exception 1.0"]="OpenJDK-assembly-exception-1.0"
LICENSEMAPPINGS["OpenLDAP Public License v1.2"]="OLDAP-1.2"
LICENSEMAPPINGS["OpenLDAP Public License v2.0.1"]="OLDAP-2.0.1"
LICENSEMAPPINGS["OpenLDAP Public License v2.3"]="OLDAP-2.3"
LICENSEMAPPINGS["OpenLDAP Public License v2.4"]="OLDAP-2.4"
LICENSEMAPPINGS["OpenLDAP Public License v2.7"]="OLDAP-2.7"
LICENSEMAPPINGS["OpenLDAP Public License v2.8"]="OLDAP-2.8"
LICENSEMAPPINGS["OpenSSL License"]="OpenSSL"
LICENSEMAPPINGS["OpenVPN OpenSSL Exception"]="openvpn-openssl-exception"
LICENSEMAPPINGS["OSET Public License version 2.1"]="OSET-PL-2.1"
LICENSEMAPPINGS["PERL Artistic License"]="Artistic-1.0-Perl"
LICENSEMAPPINGS["PHP License v3.0"]="PHP-3.0"
LICENSEMAPPINGS["PHP License v3.01"]="PHP-3.01"
LICENSEMAPPINGS["Plexus Classworlds License"]="Plexus"
LICENSEMAPPINGS["psfrag License"]="psfrag"
LICENSEMAPPINGS["psutils License"]="psutils"
LICENSEMAPPINGS["Python License 2.0"]="Python-2.0"
LICENSEMAPPINGS["Q Public License 1.0"]="QPL-1.0"
LICENSEMAPPINGS["Qhull License"]="Qhull"
LICENSEMAPPINGS["Qwt exception 1.0"]="Qwt-exception-1.0"
LICENSEMAPPINGS["Rdisc License"]="Rdisc"
LICENSEMAPPINGS["RealNetworks Public Source License v1.0"]="RPSL-1.0"
LICENSEMAPPINGS["Reciprocal Public License"]="RPL-1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.1"]="RPL-1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.5"]="RPL-1.5"
LICENSEMAPPINGS["Red Hat eCos Public License v1.1"]="RHeCos-1.1"
LICENSEMAPPINGS["Ricoh Source Code Public License"]="RSCPL"
LICENSEMAPPINGS["RSA Message-Digest License"]="RSA-MD"
LICENSEMAPPINGS["Ruby License"]="Ruby"
LICENSEMAPPINGS["Sax Public Domain Notice"]="SAX-PD"
LICENSEMAPPINGS["Saxpath License"]="Saxpath"
LICENSEMAPPINGS["SCEA Shared Source License"]="SCEA"
LICENSEMAPPINGS["Scheme Widget Library (SWL) Software License Agreement"]="SWL"
LICENSEMAPPINGS["Secure Messaging Protocol Public License"]="SMPPL"
LICENSEMAPPINGS["Sendmail License"]="Sendmail"
LICENSEMAPPINGS["SGI Free Software License B v1.0"]="SGI-B-1.0"
LICENSEMAPPINGS["SGI Free Software License B v1.1"]="SGI-B-1.1"
LICENSEMAPPINGS["SGI Free Software License B v2.0"]="SGI-B-2.0"
LICENSEMAPPINGS["SIL Open Font License 1.0"]="OFL-1.0"
LICENSEMAPPINGS["SIL Open Font License 1.1"]="OFL-1.1"
LICENSEMAPPINGS["Simple Public License 2.0"]="SimPL-2.0"
LICENSEMAPPINGS["Sleepycat License"]="Sleepycat"
LICENSEMAPPINGS["SNIA Public License 1.1"]="SNIA"
LICENSEMAPPINGS["Spencer License 86"]="Spencer-86"
LICENSEMAPPINGS["Spencer License 94"]="Spencer-94"
LICENSEMAPPINGS["Spencer License 99"]="Spencer-99"
LICENSEMAPPINGS["Standard ML of New Jersey License"]="SMLNJ"
LICENSEMAPPINGS["SugarCRM Public License v1.1.3"]="SugarCRM-1.1.3"
LICENSEMAPPINGS["Sun Industry Standards Source License (SISSL) v1.1"]="SISSL"
LICENSEMAPPINGS["Sun Industry Standards Source License v1.2"]="SISSL-1.2"
LICENSEMAPPINGS["Sun Public License v1.0"]="SPL-1.0"
LICENSEMAPPINGS["Sybase Open Watcom Public License 1.0"]="Watcom-1.0"
LICENSEMAPPINGS["Tcl License"]="TCL"
LICENSEMAPPINGS["TCP Wrappers License"]="TCP-wrappers"
LICENSEMAPPINGS["The Beerware License"]="Beerware"
LICENSEMAPPINGS["The Code Project Open License (CPOL) 1.02"]="CPOL-1.02"
LICENSEMAPPINGS["The Curl License"]="curl"
LICENSEMAPPINGS["The JSON License"]="JSON"
LICENSEMAPPINGS["The PostgreSQL License"]="PostgreSQL"
LICENSEMAPPINGS["The Unlicense"]="Unlicense"
LICENSEMAPPINGS["TMate License"]="TMate"
LICENSEMAPPINGS["TORQUE v2.5+ Software License v1.1"]="TORQUE-1.1"
LICENSEMAPPINGS["Trusster Open Source License"]="TOSL"
LICENSEMAPPINGS["U-Boot exception 2.0"]="u-boot-exception-2.0"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2015)"]="Unicode-DFS-2015"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2016)"]="Unicode-DFS-2016"
LICENSEMAPPINGS["Unicode Terms of Use"]="Unicode-TOU"
LICENSEMAPPINGS["Universal Permissive License v1.0"]="UPL-1.0"
LICENSEMAPPINGS["University of Illinois/NCSA Open Source License"]="NCSA"
LICENSEMAPPINGS["US Naval Research Laboratory (NRL) v1.1"]="NRL"
LICENSEMAPPINGS["Vim License"]="Vim"
LICENSEMAPPINGS["VOSTROM Public License for Open Source"]="VOSTROM"
LICENSEMAPPINGS["Vovida Software License v1.0"]="VSL-1.0"
LICENSEMAPPINGS["W3C Software Notice and Document License (2015-05-13)"]="W3C-20150513"
LICENSEMAPPINGS["W3C Software Notice and License (1998-07-20)"]="W3C-19980720"
LICENSEMAPPINGS["W3C Software Notice and License (2002-12-31)"]="W3C"
LICENSEMAPPINGS["Wsuipa License"]="Wsuipa"
LICENSEMAPPINGS["WxWindows Library Exception 3.1"]="WxWindows-exception-3.1"
LICENSEMAPPINGS["wxWindows Library Licence, Version 3.1"]="wxWindows"
LICENSEMAPPINGS["X.Net License"]="Xnet"
LICENSEMAPPINGS["X11 License"]="X11"
LICENSEMAPPINGS["Xerox License"]="Xerox"
LICENSEMAPPINGS["XFree86 License 1.1"]="XFree86-1.1"
LICENSEMAPPINGS["xinetd License"]="xinetd"
LICENSEMAPPINGS["XPP License"]="xpp"
LICENSEMAPPINGS["XSkat"]="XSkat"
LICENSEMAPPINGS["Yahoo! Public License v1.0"]="YPL-1.0"
LICENSEMAPPINGS["Yahoo! Public License v1.1"]="YPL-1.1"
LICENSEMAPPINGS["Zed License"]="Zed"
LICENSEMAPPINGS["Zend License v2.0"]="Zend-2.0"
LICENSEMAPPINGS["Zimbra Public License v1.4"]="Zimbra-1.4"
LICENSEMAPPINGS["Zimbra Publice License v1.3"]="Zimbra-1.3"
LICENSEMAPPINGS["zlib License"]="Zlib"
LICENSEMAPPINGS["zlib/libpng License with Acknowledgement"]="zlib-acknowledgement"
LICENSEMAPPINGS["Zope Public License 1.1"]="ZPL-1.1"
LICENSEMAPPINGS["Zope Public License 2.0"]="ZPL-2.0"
LICENSEMAPPINGS["Zope Public License 2.1"]="ZPL-2.1"
'''
LICENSEMAPPINGS["389 Directory Server Exception"] = "389 Directory Server Exception"
LICENSEMAPPINGS["3dfx Glide License"] = "3dfx Glide License"
LICENSEMAPPINGS["3DFX GLIDE Source Code General Public License"] = "3dfx Glide License"
LICENSEMAPPINGS["Abstyles License"] = "Abstyles License"
LICENSEMAPPINGS["Academic Free License v1.1"] = "Academic Free License v1.1"
LICENSEMAPPINGS["Academic Free License v1.2"] = "Academic Free License v1.2"
LICENSEMAPPINGS["Academic Free License v2.0"] = "Academic Free License v2.0"
LICENSEMAPPINGS["Academic Free License v2.1"] = "Academic Free License v2.1"
LICENSEMAPPINGS["Academic Free License v3.0"] = "Academic Free License v3.0"
LICENSEMAPPINGS["Academy of Motion Picture Arts and Sciences BSD"] = "Academy of Motion Picture Arts and Sciences BSD"
LICENSEMAPPINGS["Adaptive Public License 1.0"] = "Adaptive Public License 1.0"
LICENSEMAPPINGS["Adobe Glyph List License"] = "Adobe Glyph List License"
LICENSEMAPPINGS["Adobe Postscript AFM License"] = "Adobe Postscript AFM License"
LICENSEMAPPINGS["Adobe Systems Incorporated Source Code License Agreement"] = "Adobe Systems Incorporated Source Code License Agreement"
LICENSEMAPPINGS["Affero General Public License v1.0"] = "Affero General Public License v1.0"
LICENSEMAPPINGS["Afmparse License"] = "Afmparse License"
LICENSEMAPPINGS["Aladdin Free Public License v8"] = "Aladdin Free Public License"
LICENSEMAPPINGS["Allegro Giftware License"] = "Giftware License"
LICENSEMAPPINGS["Amazon Digital Services License"] = "Amazon Digital Services License"
LICENSEMAPPINGS["AMD's plpa_map.c License"] = "AMD's plpa_map.c License"
LICENSEMAPPINGS["ANTLR Software Rights Notice"] = "ANTLR Software Rights Notice"
LICENSEMAPPINGS["Apache License 1.0"] = "Apache License 1.0"
LICENSEMAPPINGS["Apache License 1.1"] = "Apache License 1.1"
LICENSEMAPPINGS["Apache License 2.0"] = "Apache License 2.0"
LICENSEMAPPINGS["Apple MIT License"] = "Apple MIT License"
LICENSEMAPPINGS["Apple Public Source License 1.0"] = "Apple Public Source License 1.0"
LICENSEMAPPINGS["Apple Public Source License 1.1"] = "Apple Public Source License 1.1"
LICENSEMAPPINGS["Apple Public Source License 1.2"] = "Apple Public Source License 1.2"
LICENSEMAPPINGS["Apple Public Source License 2.0"] = "Apple Public Source License 2.0"
LICENSEMAPPINGS["Artistic License 1.0"] = "Artistic License 1.0"
LICENSEMAPPINGS["Artistic License 1.0 w/clause 8"] = "Artistic License 1.0 w/clause 8"
LICENSEMAPPINGS["Artistic License 2.0"] = "Artistic License 2.0"
LICENSEMAPPINGS["Attribution Assurance License"] = "Attribution Assurance License"
LICENSEMAPPINGS["Autoconf exception 2.0"] = "Autoconf exception 2.0"
LICENSEMAPPINGS["Autoconf exception 3.0"] = "Autoconf exception 3.0"
LICENSEMAPPINGS["Bahyph License"] = "Bahyph License"
LICENSEMAPPINGS["Barr License"] = "Barr License"
LICENSEMAPPINGS["Bison exception 2.2"] = "Bison exception 2.2"
LICENSEMAPPINGS["BitTorrent Open Source License v1.0"] = "BitTorrent Open Source License v1.0"
LICENSEMAPPINGS["BitTorrent Open Source License v1.1"] = "BitTorrent Open Source License v1.1"
LICENSEMAPPINGS["Boost Software License 1.0"] = "Boost Software License 1.0"
LICENSEMAPPINGS["Bootloader Distribution Exception"] = "Bootloader Distribution Exception"
LICENSEMAPPINGS["Borceux license"] = "Borceux license"
LICENSEMAPPINGS["BSD 1-Clause License"] = "BSD 1-Clause License"
LICENSEMAPPINGS["BSD 2-clause \"Simplified\" or \"FreeBSD\" License"] = "BSD 2-clause \"Simplified\" License"
LICENSEMAPPINGS["BSD 2-clause FreeBSD License"] = "BSD 2-clause FreeBSD License"
LICENSEMAPPINGS["BSD 2-clause NetBSD License"] = "BSD 2-clause NetBSD License"
LICENSEMAPPINGS["BSD 3-clause \"New\" or \"Revised\" License"] = "BSD 3-clause \"New\" or \"Revised\" License"
LICENSEMAPPINGS["BSD 3-clause Clear License"] = "BSD 3-clause Clear License"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License"] = "BSD 3-Clause No Nuclear License"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear License 2014"] = "BSD 3-Clause No Nuclear License 2014"
LICENSEMAPPINGS["BSD 3-Clause No Nuclear Warranty"] = "BSD 3-Clause No Nuclear Warranty"
LICENSEMAPPINGS["BSD 4-clause \"Original\" or \"Old\" License"] = "BSD 4-clause \"Original\" or \"Old\" License"
LICENSEMAPPINGS["BSD Protection License"] = "BSD Protection License"
LICENSEMAPPINGS["BSD Source Code Attribution"] = "BSD Source Code Attribution"
LICENSEMAPPINGS["BSD with attribution"] = "BSD with attribution"
LICENSEMAPPINGS["BSD Zero Clause License"] = "BSD Zero Clause License"
LICENSEMAPPINGS["BSD-2-Clause Plus Patent License"] = "BSD-2-Clause Plus Patent License"
LICENSEMAPPINGS["BSD-4-Clause (University of California-Specific)"] = "BSD-4-Clause (University of California-Specific)"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.5"] = "bzip2 and libbzip2 License v1.0.5"
LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.6"] = "bzip2 and libbzip2 License v1.0.6"
LICENSEMAPPINGS["Caldera License"] = "Caldera License"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.0"] = "CeCILL Free Software License Agreement v1.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.1"] = "CeCILL Free Software License Agreement v1.1"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.0"] = "CeCILL Free Software License Agreement v2.0"
LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.1"] = "CeCILL Free Software License Agreement v2.1"
LICENSEMAPPINGS["CeCILL-B Free Software License Agreement v1.0"] = "CeCILL-B Free Software License Agreement"
LICENSEMAPPINGS["CeCILL-C Free Software License Agreement v1.0"] = "CeCILL-C Free Software License Agreement"
LICENSEMAPPINGS["Clarified Artistic License"] = "Clarified Artistic License"
LICENSEMAPPINGS["Classpath exception 2.0"] = "Classpath exception 2.0"
LICENSEMAPPINGS["CLISP exception 2.0"] = "CLISP exception 2.0"
LICENSEMAPPINGS["CMU License"] = "CMU License"
LICENSEMAPPINGS["CNRI Python License"] = "CNRI Python License"
LICENSEMAPPINGS["CNRI Python Open Source GPL Compatible License Agreement"] = "CNRI Python Open Source GPL Compatible License Agreement"
LICENSEMAPPINGS["Common Development and Distribution License 1.0"] = "Common Development and Distribution License 1.0"
LICENSEMAPPINGS["Common Development and Distribution License 1.1"] = "Common Development and Distribution License 1.1"
LICENSEMAPPINGS["Common Public Attribution License 1.0"] = "Common Public Attribution License 1.0"
LICENSEMAPPINGS["Common Public License"] = "Common Public License 1.0"
LICENSEMAPPINGS["Community Data License Agreement Permissive 1.0"] = "Community Data License Agreement Permissive 1.0"
LICENSEMAPPINGS["Community Data License Agreement Sharing 1.0"] = "Community Data License Agreement Sharing 1.0"
LICENSEMAPPINGS["Computer Associates Trusted Open Source License 1.1"] = "Computer Associates Trusted Open Source License 1.1"
LICENSEMAPPINGS["Condor Public License v1.1"] = "Condor Public License v1.1"
LICENSEMAPPINGS["Creative Commons Attribution 1.0"] = "Creative Commons Attribution 1.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.0"] = "Creative Commons Attribution 2.0"
LICENSEMAPPINGS["Creative Commons Attribution 2.5"] = "Creative Commons Attribution 2.5"
LICENSEMAPPINGS["Creative Commons Attribution 3.0"] = "Creative Commons Attribution 3.0"
LICENSEMAPPINGS["Creative Commons Attribution 4.0"] = "Creative Commons Attribution 4.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 1.0"] = "Creative Commons Attribution No Derivatives 1.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.0"] = "Creative Commons Attribution No Derivatives 2.0"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.5"] = "Creative Commons Attribution No Derivatives 2.5"
LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 3.0"] = "Creative Commons Attribution No Derivatives 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 1.0"] = "Creative Commons Attribution Non Commercial 1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.0"] = "Creative Commons Attribution Non Commercial 2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.5"] = "Creative Commons Attribution Non Commercial 2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 3.0"] = "Creative Commons Attribution Non Commercial 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 1.0"] = "Creative Commons Attribution Non Commercial No Derivatives 1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.0"] = "Creative Commons Attribution Non Commercial No Derivatives 2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.5"] = "Creative Commons Attribution Non Commercial No Derivatives 2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 3.0"] = "Creative Commons Attribution Non Commercial No Derivatives 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 1.0"] = "Creative Commons Attribution Non Commercial Share Alike 1.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.0"] = "Creative Commons Attribution Non Commercial Share Alike 2.0"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.5"] = "Creative Commons Attribution Non Commercial Share Alike 2.5"
LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 3.0"] = "Creative Commons Attribution Non Commercial Share Alike 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 1.0"] = "Creative Commons Attribution Share Alike 1.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.0"] = "Creative Commons Attribution Share Alike 2.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.5"] = "Creative Commons Attribution Share Alike 2.5"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 3.0"] = "Creative Commons Attribution Share Alike 3.0"
LICENSEMAPPINGS["Creative Commons Attribution Share Alike 4.0"] = "Creative Commons Attribution Share Alike 4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NoDerivatives 4.0"] = "Creative Commons Attribution No Derivatives 4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial 4.0"] = "Creative Commons Attribution Non Commercial 4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-NoDerivatives 4.0"] = "Creative Commons Attribution Non Commercial No Derivatives 4.0"
LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-ShareAlike 4.0"] = "Creative Commons Attribution Non Commercial Share Alike 4.0"
LICENSEMAPPINGS["Creative Commons CC0 1.0 Universal"] = "Creative Commons Zero v1.0 Universal"
LICENSEMAPPINGS["Crossword License"] = "Crossword License"
LICENSEMAPPINGS["CrystalStacker License"] = "CrystalStacker License"
LICENSEMAPPINGS["CUA Office Public License v1.0"] = "CUA Office Public License v1.0"
LICENSEMAPPINGS["Cube License"] = "Cube License"
LICENSEMAPPINGS["Deutsche Freie Software Lizenz"] = "Deutsche Freie Software Lizenz"
LICENSEMAPPINGS["diffmark license"] = "diffmark license"
LICENSEMAPPINGS["DigiRule FOSS License Exception"] = "DigiRule FOSS License Exception"
LICENSEMAPPINGS["Do What The Fuck You Want To Public License"] = "Do What The F*ck You Want To Public License"
LICENSEMAPPINGS["DOC Software License"] = "DOC License"
LICENSEMAPPINGS["Dotseqn License"] = "Dotseqn License"
LICENSEMAPPINGS["DSDP License"] = "DSDP License"
LICENSEMAPPINGS["dvipdfm License"] = "dvipdfm License"
LICENSEMAPPINGS["Eclipse Public License 1.0"] = "Eclipse Public License 1.0"
LICENSEMAPPINGS["Eclipse Public License 2.0"] = "Eclipse Public License 2.0"
LICENSEMAPPINGS["eCos exception 2.0"] = "eCos exception 2.0"
LICENSEMAPPINGS["eCos license version 2.0"] = "eCos license version 2.0"
LICENSEMAPPINGS["Educational Community License v1.0"] = "Educational Community License v1.0"
LICENSEMAPPINGS["Educational Community License v2.0"] = "Educational Community License v2.0"
LICENSEMAPPINGS["eGenix.com Public License 1.1.0"] = "eGenix.com Public License 1.1.0"
LICENSEMAPPINGS["Eiffel Forum License v1.0"] = "Eiffel Forum License v1.0"
LICENSEMAPPINGS["Eiffel Forum License v2.0"] = "Eiffel Forum License v2.0"
LICENSEMAPPINGS["Enlightenment License (e16)"] = "Enlightenment License (e16)"
LICENSEMAPPINGS["enna License"] = "enna License"
LICENSEMAPPINGS["Entessa Public License"] = "Entessa Public License v1.0"
LICENSEMAPPINGS["Erlang Public License v1.1"] = "Erlang Public License v1.1"
LICENSEMAPPINGS["EU DataGrid Software License"] = "EU DataGrid Software License"
LICENSEMAPPINGS["European Union Public License 1.0"] = "European Union Public License 1.0"
LICENSEMAPPINGS["European Union Public License 1.1"] = "European Union Public License 1.1"
LICENSEMAPPINGS["European Union Public License 1.2"] = "European Union Public License 1.2"
LICENSEMAPPINGS["Eurosym License v2"] = "Eurosym License"
LICENSEMAPPINGS["Fair License"] = "Fair License"
LICENSEMAPPINGS["FastCGI"] = "Open Market License"
LICENSEMAPPINGS["Fawkes Runtime Exception"] = "Fawkes Runtime Exception"
LICENSEMAPPINGS["feh License"] = "feh License"
LICENSEMAPPINGS["FLTK exception"] = "FLTK exception"
LICENSEMAPPINGS["Font exception 2.0"] = "Font exception 2.0"
LICENSEMAPPINGS["Frameworx Open License 1.0"] = "Frameworx Open License 1.0"
LICENSEMAPPINGS["FreeImage Public License v1.0"] = "FreeImage Public License v1.0"
LICENSEMAPPINGS["FreeRTOS Exception 2.0"] = "FreeRTOS Exception 2.0"
LICENSEMAPPINGS["FreeType License"] = "Freetype Project License"
LICENSEMAPPINGS["FSF All Permissive License"] = "FSF All Permissive License"
LICENSEMAPPINGS["FSF Unlimited License"] = "FSF Unlimited License"
LICENSEMAPPINGS["FSF Unlimited License (with License Retention)"] = "FSF Unlimited License (with License Retention)"
LICENSEMAPPINGS["GCC Runtime Library exception 2.0"] = "GCC Runtime Library exception 2.0"
LICENSEMAPPINGS["GCC Runtime Library exception 3.1"] = "GCC Runtime Library exception 3.1"
LICENSEMAPPINGS["GL2PS License, Version 2"] = "GL2PS License"
LICENSEMAPPINGS["Glulxe License"] = "Glulxe License"
LICENSEMAPPINGS["GNU Affero General Public License v3.0"] = "GNU Affero General Public License v3.0"
LICENSEMAPPINGS["GNU Free Documentation License v1.1"] = "GNU Free Documentation License v1.1"
LICENSEMAPPINGS["GNU Free Documentation License v1.2"] = "GNU Free Documentation License v1.2"
LICENSEMAPPINGS["GNU Free Documentation License v1.3"] = "GNU Free Documentation License v1.3"
LICENSEMAPPINGS["GNU General Public License v1.0"] = "GNU General Public License v1.0 only"
LICENSEMAPPINGS["GNU General Public License v1.0 or later"] = "GNU General Public License v1.0 or later"
LICENSEMAPPINGS["GNU General Public License v2.0"] = "GNU General Public License v2.0 only"
LICENSEMAPPINGS["GNU General Public License v2.0 or later"] = "GNU General Public License v2.0 or later"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Autoconf exception"] = "GNU General Public License v2.0 w/Autoconf exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Bison exception"] = "GNU General Public License v2.0 w/Bison exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/Font exception"] = "GNU General Public License v2.0 w/Font exception"
LICENSEMAPPINGS["GNU General Public License v2.0 w/GCC Runtime Library exception"] = "GNU General Public License v2.0 w/GCC Runtime Library exception"
LICENSEMAPPINGS["GNU General Public License v2.0 with Classpath Exception"] = "GNU General Public License v2.0 w/Classpath exception"
LICENSEMAPPINGS["GNU General Public License v3.0"] = "GNU General Public License v3.0 only"
LICENSEMAPPINGS["GNU General Public License v3.0 or later"] = "GNU General Public License v3.0 or later"
LICENSEMAPPINGS["GNU General Public License v3.0 w/Autoconf exception"] = "GNU General Public License v3.0 w/Autoconf exception"
LICENSEMAPPINGS["GNU General Public License v3.0 w/GCC Runtime Library exception"] = "GNU General Public License v3.0 w/GCC Runtime Library exception"
LICENSEMAPPINGS["GNU JavaMail exception"] = "GNU JavaMail exception"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1"] = "GNU Lesser General Public License v2.1 only"
LICENSEMAPPINGS["GNU Lesser General Public License v2.1 or later"] = "GNU Lesser General Public License v2.1 or later"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0"] = "GNU Lesser General Public License v3.0 only"
LICENSEMAPPINGS["GNU Lesser General Public License v3.0 or later"] = "GNU Lesser General Public License v3.0 or later"
LICENSEMAPPINGS["GNU Library General Public License v2.0"] = "GNU Library General Public License v2 only"
LICENSEMAPPINGS["GNU Library General Public License v2.0 or later"] = "GNU Library General Public License v2 or later"
LICENSEMAPPINGS["gnuplot License"] = "gnuplot License"
LICENSEMAPPINGS["gSOAP Public License v1.3b"] = "gSOAP Public License v1.3b"
LICENSEMAPPINGS["Haskell Language Report License"] = "Haskell Language Report License"
LICENSEMAPPINGS["Historic Permission Notice and Disclaimer"] = "Historic Permission Notice and Disclaimer"
LICENSEMAPPINGS["i2p GPL+Java Exception"] = "i2p GPL+Java Exception"
LICENSEMAPPINGS["IBM PowerPC Initialization and Boot Software"] = "IBM PowerPC Initialization and Boot Software"
LICENSEMAPPINGS["IBM Public License v1.0"] = "IBM Public License v1.0"
LICENSEMAPPINGS["ICU License"] = "ICU License"
LICENSEMAPPINGS["ImageMagick (Apache 2.0) License"] = "ImageMagick License"
LICENSEMAPPINGS["iMatix Standard Function Library Agreement"] = "iMatix Standard Function Library Agreement"
LICENSEMAPPINGS["Imlib2 License"] = "Imlib2 License"
LICENSEMAPPINGS["Independent JPEG Group License"] = "Independent JPEG Group License"
LICENSEMAPPINGS["Info-ZIP License"] = "Info-ZIP License"
LICENSEMAPPINGS["Intel ACPI Software License Agreement"] = "Intel ACPI Software License Agreement"
LICENSEMAPPINGS["Intel Open Source License"] = "Intel Open Source License"
LICENSEMAPPINGS["Interbase Public License v1.0"] = "Interbase Public License v1.0"
LICENSEMAPPINGS["IPA Font License"] = "IPA Font License"
LICENSEMAPPINGS["ISC License (ISC)"] = "ISC License"
LICENSEMAPPINGS["JasPer License Version 2.0"] = "JasPer License"
LICENSEMAPPINGS["Jython License"] = "CNRI Jython License"
LICENSEMAPPINGS["LaTeX Project Public License v1.0"] = "LaTeX Project Public License v1.0"
LICENSEMAPPINGS["LaTeX Project Public License v1.1"] = "LaTeX Project Public License v1.1"
LICENSEMAPPINGS["LaTeX Project Public License v1.2"] = "LaTeX Project Public License v1.2"
LICENSEMAPPINGS["LaTeX Project Public License v1.3a"] = "LaTeX Project Public License v1.3a"
LICENSEMAPPINGS["LaTeX Project Public License v1.3c"] = "LaTeX Project Public License v1.3c"
LICENSEMAPPINGS["Latex2e License"] = "Latex2e License"
LICENSEMAPPINGS["Lawrence Berkeley National Labs BSD variant license"] = "Lawrence Berkeley National Labs BSD variant license"
LICENSEMAPPINGS["Leptonica License"] = "Leptonica License"
LICENSEMAPPINGS["Lesser General Public License For Linguistic Resources"] = "Lesser General Public License For Linguistic Resources"
LICENSEMAPPINGS["libpng License"] = "libpng License"
LICENSEMAPPINGS["libtiff License"] = "libtiff License"
LICENSEMAPPINGS["Libtool Exception"] = "Libtool Exception"
LICENSEMAPPINGS["Licence Art Libre 1.2"] = "Licence Art Libre 1.2"
LICENSEMAPPINGS["Licence Art Libre 1.3"] = "Licence Art Libre 1.3"
LICENSEMAPPINGS["Licence Libre du Québec – Permissive version 1.1"] = "Licence Libre du Québec – Permissive version 1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité forte version 1.1"] = "Licence Libre du Québec – Réciprocité forte version 1.1"
LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité version 1.1"] = "Licence Libre du Québec – Réciprocité version 1.1"
LICENSEMAPPINGS["Linux Kernel Variant of OpenIB.org license"] = "Linux Kernel Variant of OpenIB.org license"
LICENSEMAPPINGS["Linux Syscall Note"] = "Linux Syscall Note"
LICENSEMAPPINGS["LLVM Exception"] = "LLVM Exception"
LICENSEMAPPINGS["Lucent Public License v1.0"] = "Lucent Public License Version 1.0"
LICENSEMAPPINGS["Lucent Public License v1.02 (Plan9)"] = "Lucent Public License v1.02"
LICENSEMAPPINGS["LZMA exception"] = "LZMA exception"
LICENSEMAPPINGS["Macros and Inline Functions Exception"] = "Macros and Inline Functions Exception"
LICENSEMAPPINGS["MakeIndex License"] = "MakeIndex License"
LICENSEMAPPINGS["Matrix Template Library License"] = "Matrix Template Library License"
LICENSEMAPPINGS["Microsoft Public License (Ms-PL)"] = "Microsoft Public License"
LICENSEMAPPINGS["Microsoft Reciprocal License (Ms-RL)"] = "Microsoft Reciprocal License"
LICENSEMAPPINGS["MirOS Licence"] = "MirOS Licence"
LICENSEMAPPINGS["MIT +no-false-attribs license"] = "MIT +no-false-attribs license"
LICENSEMAPPINGS["MIT License (Expat)"] = "MIT license"
LICENSEMAPPINGS["MIT No Attribution"] = "MIT No Attribution"
LICENSEMAPPINGS["Motosoto License"] = "Motosoto License"
LICENSEMAPPINGS["Mozilla Public License 1.0"] = "Mozilla Public License 1.0"
LICENSEMAPPINGS["Mozilla Public License 1.1"] = "Mozilla Public License 1.1"
LICENSEMAPPINGS["Mozilla Public License 2.0"] = "Mozilla Public License 2.0"
LICENSEMAPPINGS["Mozilla Public License 2.0 (no copyleft exception)"] = "Mozilla Public License 2.0 (no copyleft exception)"
LICENSEMAPPINGS["MPICH2 License"] = "mpich2 License"
LICENSEMAPPINGS["Multics License"] = "Multics License"
LICENSEMAPPINGS["Mup License"] = "Mup License"
LICENSEMAPPINGS["NASA Open Source Agreement 1.3"] = "NASA Open Source Agreement 1.3"
LICENSEMAPPINGS["Naumen Public License"] = "Naumen Public License"
LICENSEMAPPINGS["Net Boolean Public License v1"] = "Net Boolean Public License v1"
LICENSEMAPPINGS["netCDF License"] = "NetCDF license"
LICENSEMAPPINGS["Nethack General Public License"] = "Nethack General Public License"
LICENSEMAPPINGS["Netizen Open Source License v1.0"] = "Netizen Open Source License"
LICENSEMAPPINGS["Netscape Public License 1.0"] = "Netscape Public License v1.0"
LICENSEMAPPINGS["Netscape Public License 1.1"] = "Netscape Public License v1.1"
LICENSEMAPPINGS["Net-SNMP License"] = "Net-SNMP License"
LICENSEMAPPINGS["Newsletr License"] = "Newsletr License"
LICENSEMAPPINGS["No Limit Public License"] = "No Limit Public License"
LICENSEMAPPINGS["Nokia Open Source License"] = "Nokia Open Source License"
LICENSEMAPPINGS["Nokia Qt LGPL exception 1.1"] = "Nokia Qt LGPL exception 1.1"
LICENSEMAPPINGS["Non-Profit Open Software License 3.0"] = "Non-Profit Open Software License 3.0"
LICENSEMAPPINGS["Norwegian Licence for Open Government Data"] = "Norwegian Licence for Open Government Data"
LICENSEMAPPINGS["Noweb License"] = "Noweb License"
LICENSEMAPPINGS["NTP License"] = "NTP License"
LICENSEMAPPINGS["Nunit License"] = "Nunit License"
LICENSEMAPPINGS["OCLC Research Public License 2.0"] = "OCLC Research Public License 2.0"
LICENSEMAPPINGS["ODC Open Database License v1.0"] = "ODC Open Database License v1.0"
LICENSEMAPPINGS["ODC Public Domain Dedication & License 1.0"] = "ODC Public Domain Dedication & License 1.0"
LICENSEMAPPINGS["Open CASCADE Exception 1.0"] = "Open CASCADE Exception 1.0"
LICENSEMAPPINGS["Open CASCADE Technology Public License"] = "Open CASCADE Technology Public License"
LICENSEMAPPINGS["Open Group Test Suite License"] = "Open Group Test Suite License"
LICENSEMAPPINGS["Open LDAP Public License 2.2.2"] = "Open LDAP Public License 2.2.2"
LICENSEMAPPINGS["Open LDAP Public License v1.1"] = "Open LDAP Public License v1.1"
LICENSEMAPPINGS["Open LDAP Public License v1.3"] = "Open LDAP Public License v1.3"
LICENSEMAPPINGS["Open LDAP Public License v1.4"] = "Open LDAP Public License v1.4"
LICENSEMAPPINGS["Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)"] = "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)"
LICENSEMAPPINGS["Open LDAP Public License v2.1"] = "Open LDAP Public License v2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.2"] = "Open LDAP Public License v2.2"
LICENSEMAPPINGS["Open LDAP Public License v2.2.1"] = "Open LDAP Public License v2.2.1"
LICENSEMAPPINGS["Open LDAP Public License v2.5"] = "Open LDAP Public License v2.5"
LICENSEMAPPINGS["Open LDAP Public License v2.6"] = "Open LDAP Public License v2.6"
LICENSEMAPPINGS["Open Public License v1.0"] = "Open Public License v1.0"
LICENSEMAPPINGS["Open Software License 1.0"] = "Open Software License 1.0"
LICENSEMAPPINGS["Open Software License 1.1"] = "Open Software License 1.1"
LICENSEMAPPINGS["Open Software License 2.0"] = "Open Software License 2.0"
LICENSEMAPPINGS["Open Software License 2.1"] = "Open Software License 2.1"
LICENSEMAPPINGS["Open Software License 3.0"] = "Open Software License 3.0"
LICENSEMAPPINGS["OpenJDK Assembly exception 1.0"] = "OpenJDK Assembly exception 1.0"
LICENSEMAPPINGS["OpenLDAP Public License v1.2"] = "Open LDAP Public License v1.2"
LICENSEMAPPINGS["OpenLDAP Public License v2.0.1"] = "Open LDAP Public License v2.0.1"
LICENSEMAPPINGS["OpenLDAP Public License v2.3"] = "Open LDAP Public License v2.3"
LICENSEMAPPINGS["OpenLDAP Public License v2.4"] = "Open LDAP Public License v2.4"
LICENSEMAPPINGS["OpenLDAP Public License v2.7"] = "Open LDAP Public License v2.7"
LICENSEMAPPINGS["OpenLDAP Public License v2.8"] = "Open LDAP Public License v2.8"
LICENSEMAPPINGS["OpenSSL License"] = "OpenSSL License"
LICENSEMAPPINGS["OpenVPN OpenSSL Exception"] = "OpenVPN OpenSSL Exception"
LICENSEMAPPINGS["OSET Public License version 2.1"] = "OSET Public License version 2.1"
LICENSEMAPPINGS["PERL Artistic License"] = "Artistic License 1.0 (Perl)"
LICENSEMAPPINGS["PHP License v3.0"] = "PHP License v3.0"
LICENSEMAPPINGS["PHP License v3.01"] = "PHP License v3.01"
LICENSEMAPPINGS["Plexus Classworlds License"] = "Plexus Classworlds License"
LICENSEMAPPINGS["psfrag License"] = "psfrag License"
LICENSEMAPPINGS["psutils License"] = "psutils License"
LICENSEMAPPINGS["Python License 2.0"] = "Python License 2.0"
LICENSEMAPPINGS["Q Public License 1.0"] = "Q Public License 1.0"
LICENSEMAPPINGS["Qhull License"] = "Qhull License"
LICENSEMAPPINGS["Qwt exception 1.0"] = "Qwt exception 1.0"
LICENSEMAPPINGS["Rdisc License"] = "Rdisc License"
LICENSEMAPPINGS["RealNetworks Public Source License v1.0"] = "RealNetworks Public Source License v1.0"
LICENSEMAPPINGS["Reciprocal Public License"] = "Reciprocal Public License 1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.1"] = "Reciprocal Public License 1.1"
LICENSEMAPPINGS["Reciprocal Public License 1.5"] = "Reciprocal Public License 1.5"
LICENSEMAPPINGS["Red Hat eCos Public License v1.1"] = "Red Hat eCos Public License v1.1"
LICENSEMAPPINGS["Ricoh Source Code Public License"] = "Ricoh Source Code Public License"
LICENSEMAPPINGS["RSA Message-Digest License"] = "RSA Message-Digest License"
LICENSEMAPPINGS["Ruby License"] = "Ruby License"
LICENSEMAPPINGS["Sax Public Domain Notice"] = "Sax Public Domain Notice"
LICENSEMAPPINGS["Saxpath License"] = "Saxpath License"
LICENSEMAPPINGS["SCEA Shared Source License"] = "SCEA Shared Source License"
LICENSEMAPPINGS["Scheme Widget Library (SWL) Software License Agreement"] = "Scheme Widget Library (SWL) Software License Agreement"
LICENSEMAPPINGS["Secure Messaging Protocol Public License"] = "Secure Messaging Protocol Public License"
LICENSEMAPPINGS["Sendmail License"] = "Sendmail License"
LICENSEMAPPINGS["SGI Free Software License B v1.0"] = "SGI Free Software License B v1.0"
LICENSEMAPPINGS["SGI Free Software License B v1.1"] = "SGI Free Software License B v1.1"
LICENSEMAPPINGS["SGI Free Software License B v2.0"] = "SGI Free Software License B v2.0"
LICENSEMAPPINGS["SIL Open Font License 1.0"] = "SIL Open Font License 1.0"
LICENSEMAPPINGS["SIL Open Font License 1.1"] = "SIL Open Font License 1.1"
LICENSEMAPPINGS["Simple Public License 2.0"] = "Simple Public License 2.0"
LICENSEMAPPINGS["Sleepycat License"] = "Sleepycat License"
LICENSEMAPPINGS["SNIA Public License 1.1"] = "SNIA Public License 1.1"
LICENSEMAPPINGS["Spencer License 86"] = "Spencer License 86"
LICENSEMAPPINGS["Spencer License 94"] = "Spencer License 94"
LICENSEMAPPINGS["Spencer License 99"] = "Spencer License 99"
LICENSEMAPPINGS["Standard ML of New Jersey License"] = "Standard ML of New Jersey License"
LICENSEMAPPINGS["SugarCRM Public License v1.1.3"] = "SugarCRM Public License v1.1.3"
LICENSEMAPPINGS["Sun Industry Standards Source License (SISSL) v1.1"] = "Sun Industry Standards Source License v1.1"
LICENSEMAPPINGS["Sun Industry Standards Source License v1.2"] = "Sun Industry Standards Source License v1.2"
LICENSEMAPPINGS["Sun Public License v1.0"] = "Sun Public License v1.0"
LICENSEMAPPINGS["Sybase Open Watcom Public License 1.0"] = "Sybase Open Watcom Public License 1.0"
LICENSEMAPPINGS["Tcl License"] = "TCL/TK License"
LICENSEMAPPINGS["TCP Wrappers License"] = "TCP Wrappers License"
LICENSEMAPPINGS["The Beerware License"] = "Beerware License"
LICENSEMAPPINGS["The Code Project Open License (CPOL) 1.02"] = "Code Project Open License 1.02"
LICENSEMAPPINGS["The Curl License"] = "curl License"
LICENSEMAPPINGS["The JSON License"] = "JSON License"
LICENSEMAPPINGS["The PostgreSQL License"] = "PostgreSQL License"
LICENSEMAPPINGS["The Unlicense"] = "The Unlicense"
LICENSEMAPPINGS["TMate License"] = "TMate Open Source License"
LICENSEMAPPINGS["TORQUE v2.5+ Software License v1.1"] = "TORQUE v2.5+ Software License v1.1"
LICENSEMAPPINGS["Trusster Open Source License"] = "Trusster Open Source License"
LICENSEMAPPINGS["U-Boot exception 2.0"] = "U-Boot exception 2.0"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2015)"] = "Unicode License Agreement - Data Files and Software (2015)"
LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2016)"] = "Unicode License Agreement - Data Files and Software (2016)"
LICENSEMAPPINGS["Unicode Terms of Use"] = "Unicode Terms of Use"
LICENSEMAPPINGS["Universal Permissive License v1.0"] = "Universal Permissive License v1.0"
LICENSEMAPPINGS["University of Illinois/NCSA Open Source License"] = "University of Illinois/NCSA Open Source License"
LICENSEMAPPINGS["US Naval Research Laboratory (NRL) v1.1"] = "NRL License"
LICENSEMAPPINGS["Vim License"] = "Vim License"
LICENSEMAPPINGS["VOSTROM Public License for Open Source"] = "VOSTROM Public License for Open Source"
LICENSEMAPPINGS["Vovida Software License v1.0"] = "Vovida Software License v1.0"
LICENSEMAPPINGS["W3C Software Notice and Document License (2015-05-13)"] = "W3C Software Notice and Document License (2015-05-13)"
LICENSEMAPPINGS["W3C Software Notice and License (1998-07-20)"] = "W3C Software Notice and License (1998-07-20)"
LICENSEMAPPINGS["W3C Software Notice and License (2002-12-31)"] = "W3C Software Notice and License (2002-12-31)"
LICENSEMAPPINGS["Wsuipa License"] = "Wsuipa License"
LICENSEMAPPINGS["WxWindows Library Exception 3.1"] = "WxWindows Library Exception 3.1"
LICENSEMAPPINGS["wxWindows Library Licence, Version 3.1"] = "wxWindows Library License"
LICENSEMAPPINGS["X.Net License"] = "X.Net License"
LICENSEMAPPINGS["X11 License"] = "X11 License"
LICENSEMAPPINGS["Xerox License"] = "Xerox License"
LICENSEMAPPINGS["XFree86 License 1.1"] = "XFree86 License 1.1"
LICENSEMAPPINGS["xinetd License"] = "xinetd License"
LICENSEMAPPINGS["XPP License"] = "XPP License"
LICENSEMAPPINGS["XSkat"] = "XSkat License"
LICENSEMAPPINGS["Yahoo! Public License v1.0"] = "Yahoo! Public License v1.0"
LICENSEMAPPINGS["Yahoo! Public License v1.1"] = "Yahoo! Public License v1.1"
LICENSEMAPPINGS["Zed License"] = "Zed License"
LICENSEMAPPINGS["Zend License v2.0"] = "Zend License v2.0"
LICENSEMAPPINGS["Zimbra Public License v1.4"] = "Zimbra Public License v1.4"
LICENSEMAPPINGS["Zimbra Publice License v1.3"] = "Zimbra Public License v1.3"
LICENSEMAPPINGS["zlib License"] = "zlib License"
LICENSEMAPPINGS["zlib/libpng License with Acknowledgement"] = "zlib/libpng License with Acknowledgement"
LICENSEMAPPINGS["Zope Public License 1.1"] = "Zope Public License 1.1"
LICENSEMAPPINGS["Zope Public License 2.0"] = "Zope Public License 2.0"
LICENSEMAPPINGS["Zope Public License 2.1"] = "Zope Public License 2.1"
'''
| en | 0.573794 | Copyright 2020 Flexera Software LLC See LICENSE.TXT for full license text SPDX-License-Identifier: MIT Author : sgeary Created On : Fri May 01 2020 File : licenses.py LICENSEMAPPINGS["389 Directory Server Exception"] = "389 Directory Server Exception" LICENSEMAPPINGS["3dfx Glide License"] = "3dfx Glide License" LICENSEMAPPINGS["3DFX GLIDE Source Code General Public License"] = "3dfx Glide License" LICENSEMAPPINGS["Abstyles License"] = "Abstyles License" LICENSEMAPPINGS["Academic Free License v1.1"] = "Academic Free License v1.1" LICENSEMAPPINGS["Academic Free License v1.2"] = "Academic Free License v1.2" LICENSEMAPPINGS["Academic Free License v2.0"] = "Academic Free License v2.0" LICENSEMAPPINGS["Academic Free License v2.1"] = "Academic Free License v2.1" LICENSEMAPPINGS["Academic Free License v3.0"] = "Academic Free License v3.0" LICENSEMAPPINGS["Academy of Motion Picture Arts and Sciences BSD"] = "Academy of Motion Picture Arts and Sciences BSD" LICENSEMAPPINGS["Adaptive Public License 1.0"] = "Adaptive Public License 1.0" LICENSEMAPPINGS["Adobe Glyph List License"] = "Adobe Glyph List License" LICENSEMAPPINGS["Adobe Postscript AFM License"] = "Adobe Postscript AFM License" LICENSEMAPPINGS["Adobe Systems Incorporated Source Code License Agreement"] = "Adobe Systems Incorporated Source Code License Agreement" LICENSEMAPPINGS["Affero General Public License v1.0"] = "Affero General Public License v1.0" LICENSEMAPPINGS["Afmparse License"] = "Afmparse License" LICENSEMAPPINGS["Aladdin Free Public License v8"] = "Aladdin Free Public License" LICENSEMAPPINGS["Allegro Giftware License"] = "Giftware License" LICENSEMAPPINGS["Amazon Digital Services License"] = "Amazon Digital Services License" LICENSEMAPPINGS["AMD's plpa_map.c License"] = "AMD's plpa_map.c License" LICENSEMAPPINGS["ANTLR Software Rights Notice"] = "ANTLR Software Rights Notice" LICENSEMAPPINGS["Apache License 1.0"] = "Apache License 1.0" LICENSEMAPPINGS["Apache License 1.1"] = "Apache License 1.1" LICENSEMAPPINGS["Apache License 2.0"] = "Apache License 2.0" LICENSEMAPPINGS["Apple MIT License"] = "Apple MIT License" LICENSEMAPPINGS["Apple Public Source License 1.0"] = "Apple Public Source License 1.0" LICENSEMAPPINGS["Apple Public Source License 1.1"] = "Apple Public Source License 1.1" LICENSEMAPPINGS["Apple Public Source License 1.2"] = "Apple Public Source License 1.2" LICENSEMAPPINGS["Apple Public Source License 2.0"] = "Apple Public Source License 2.0" LICENSEMAPPINGS["Artistic License 1.0"] = "Artistic License 1.0" LICENSEMAPPINGS["Artistic License 1.0 w/clause 8"] = "Artistic License 1.0 w/clause 8" LICENSEMAPPINGS["Artistic License 2.0"] = "Artistic License 2.0" LICENSEMAPPINGS["Attribution Assurance License"] = "Attribution Assurance License" LICENSEMAPPINGS["Autoconf exception 2.0"] = "Autoconf exception 2.0" LICENSEMAPPINGS["Autoconf exception 3.0"] = "Autoconf exception 3.0" LICENSEMAPPINGS["Bahyph License"] = "Bahyph License" LICENSEMAPPINGS["Barr License"] = "Barr License" LICENSEMAPPINGS["Bison exception 2.2"] = "Bison exception 2.2" LICENSEMAPPINGS["BitTorrent Open Source License v1.0"] = "BitTorrent Open Source License v1.0" LICENSEMAPPINGS["BitTorrent Open Source License v1.1"] = "BitTorrent Open Source License v1.1" LICENSEMAPPINGS["Boost Software License 1.0"] = "Boost Software License 1.0" LICENSEMAPPINGS["Bootloader Distribution Exception"] = "Bootloader Distribution Exception" LICENSEMAPPINGS["Borceux license"] = "Borceux license" LICENSEMAPPINGS["BSD 1-Clause License"] = "BSD 1-Clause License" LICENSEMAPPINGS["BSD 2-clause \"Simplified\" or \"FreeBSD\" License"] = "BSD 2-clause \"Simplified\" License" LICENSEMAPPINGS["BSD 2-clause FreeBSD License"] = "BSD 2-clause FreeBSD License" LICENSEMAPPINGS["BSD 2-clause NetBSD License"] = "BSD 2-clause NetBSD License" LICENSEMAPPINGS["BSD 3-clause \"New\" or \"Revised\" License"] = "BSD 3-clause \"New\" or \"Revised\" License" LICENSEMAPPINGS["BSD 3-clause Clear License"] = "BSD 3-clause Clear License" LICENSEMAPPINGS["BSD 3-Clause No Nuclear License"] = "BSD 3-Clause No Nuclear License" LICENSEMAPPINGS["BSD 3-Clause No Nuclear License 2014"] = "BSD 3-Clause No Nuclear License 2014" LICENSEMAPPINGS["BSD 3-Clause No Nuclear Warranty"] = "BSD 3-Clause No Nuclear Warranty" LICENSEMAPPINGS["BSD 4-clause \"Original\" or \"Old\" License"] = "BSD 4-clause \"Original\" or \"Old\" License" LICENSEMAPPINGS["BSD Protection License"] = "BSD Protection License" LICENSEMAPPINGS["BSD Source Code Attribution"] = "BSD Source Code Attribution" LICENSEMAPPINGS["BSD with attribution"] = "BSD with attribution" LICENSEMAPPINGS["BSD Zero Clause License"] = "BSD Zero Clause License" LICENSEMAPPINGS["BSD-2-Clause Plus Patent License"] = "BSD-2-Clause Plus Patent License" LICENSEMAPPINGS["BSD-4-Clause (University of California-Specific)"] = "BSD-4-Clause (University of California-Specific)" LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.5"] = "bzip2 and libbzip2 License v1.0.5" LICENSEMAPPINGS["bzip2 and libbzip2 License v1.0.6"] = "bzip2 and libbzip2 License v1.0.6" LICENSEMAPPINGS["Caldera License"] = "Caldera License" LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.0"] = "CeCILL Free Software License Agreement v1.0" LICENSEMAPPINGS["CeCILL Free Software License Agreement v1.1"] = "CeCILL Free Software License Agreement v1.1" LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.0"] = "CeCILL Free Software License Agreement v2.0" LICENSEMAPPINGS["CeCILL Free Software License Agreement v2.1"] = "CeCILL Free Software License Agreement v2.1" LICENSEMAPPINGS["CeCILL-B Free Software License Agreement v1.0"] = "CeCILL-B Free Software License Agreement" LICENSEMAPPINGS["CeCILL-C Free Software License Agreement v1.0"] = "CeCILL-C Free Software License Agreement" LICENSEMAPPINGS["Clarified Artistic License"] = "Clarified Artistic License" LICENSEMAPPINGS["Classpath exception 2.0"] = "Classpath exception 2.0" LICENSEMAPPINGS["CLISP exception 2.0"] = "CLISP exception 2.0" LICENSEMAPPINGS["CMU License"] = "CMU License" LICENSEMAPPINGS["CNRI Python License"] = "CNRI Python License" LICENSEMAPPINGS["CNRI Python Open Source GPL Compatible License Agreement"] = "CNRI Python Open Source GPL Compatible License Agreement" LICENSEMAPPINGS["Common Development and Distribution License 1.0"] = "Common Development and Distribution License 1.0" LICENSEMAPPINGS["Common Development and Distribution License 1.1"] = "Common Development and Distribution License 1.1" LICENSEMAPPINGS["Common Public Attribution License 1.0"] = "Common Public Attribution License 1.0" LICENSEMAPPINGS["Common Public License"] = "Common Public License 1.0" LICENSEMAPPINGS["Community Data License Agreement Permissive 1.0"] = "Community Data License Agreement Permissive 1.0" LICENSEMAPPINGS["Community Data License Agreement Sharing 1.0"] = "Community Data License Agreement Sharing 1.0" LICENSEMAPPINGS["Computer Associates Trusted Open Source License 1.1"] = "Computer Associates Trusted Open Source License 1.1" LICENSEMAPPINGS["Condor Public License v1.1"] = "Condor Public License v1.1" LICENSEMAPPINGS["Creative Commons Attribution 1.0"] = "Creative Commons Attribution 1.0" LICENSEMAPPINGS["Creative Commons Attribution 2.0"] = "Creative Commons Attribution 2.0" LICENSEMAPPINGS["Creative Commons Attribution 2.5"] = "Creative Commons Attribution 2.5" LICENSEMAPPINGS["Creative Commons Attribution 3.0"] = "Creative Commons Attribution 3.0" LICENSEMAPPINGS["Creative Commons Attribution 4.0"] = "Creative Commons Attribution 4.0" LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 1.0"] = "Creative Commons Attribution No Derivatives 1.0" LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.0"] = "Creative Commons Attribution No Derivatives 2.0" LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 2.5"] = "Creative Commons Attribution No Derivatives 2.5" LICENSEMAPPINGS["Creative Commons Attribution No Derivatives 3.0"] = "Creative Commons Attribution No Derivatives 3.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 1.0"] = "Creative Commons Attribution Non Commercial 1.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.0"] = "Creative Commons Attribution Non Commercial 2.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 2.5"] = "Creative Commons Attribution Non Commercial 2.5" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial 3.0"] = "Creative Commons Attribution Non Commercial 3.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 1.0"] = "Creative Commons Attribution Non Commercial No Derivatives 1.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.0"] = "Creative Commons Attribution Non Commercial No Derivatives 2.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 2.5"] = "Creative Commons Attribution Non Commercial No Derivatives 2.5" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial No Derivatives 3.0"] = "Creative Commons Attribution Non Commercial No Derivatives 3.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 1.0"] = "Creative Commons Attribution Non Commercial Share Alike 1.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.0"] = "Creative Commons Attribution Non Commercial Share Alike 2.0" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 2.5"] = "Creative Commons Attribution Non Commercial Share Alike 2.5" LICENSEMAPPINGS["Creative Commons Attribution Non Commercial Share Alike 3.0"] = "Creative Commons Attribution Non Commercial Share Alike 3.0" LICENSEMAPPINGS["Creative Commons Attribution Share Alike 1.0"] = "Creative Commons Attribution Share Alike 1.0" LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.0"] = "Creative Commons Attribution Share Alike 2.0" LICENSEMAPPINGS["Creative Commons Attribution Share Alike 2.5"] = "Creative Commons Attribution Share Alike 2.5" LICENSEMAPPINGS["Creative Commons Attribution Share Alike 3.0"] = "Creative Commons Attribution Share Alike 3.0" LICENSEMAPPINGS["Creative Commons Attribution Share Alike 4.0"] = "Creative Commons Attribution Share Alike 4.0" LICENSEMAPPINGS["Creative Commons Attribution-NoDerivatives 4.0"] = "Creative Commons Attribution No Derivatives 4.0" LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial 4.0"] = "Creative Commons Attribution Non Commercial 4.0" LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-NoDerivatives 4.0"] = "Creative Commons Attribution Non Commercial No Derivatives 4.0" LICENSEMAPPINGS["Creative Commons Attribution-NonCommercial-ShareAlike 4.0"] = "Creative Commons Attribution Non Commercial Share Alike 4.0" LICENSEMAPPINGS["Creative Commons CC0 1.0 Universal"] = "Creative Commons Zero v1.0 Universal" LICENSEMAPPINGS["Crossword License"] = "Crossword License" LICENSEMAPPINGS["CrystalStacker License"] = "CrystalStacker License" LICENSEMAPPINGS["CUA Office Public License v1.0"] = "CUA Office Public License v1.0" LICENSEMAPPINGS["Cube License"] = "Cube License" LICENSEMAPPINGS["Deutsche Freie Software Lizenz"] = "Deutsche Freie Software Lizenz" LICENSEMAPPINGS["diffmark license"] = "diffmark license" LICENSEMAPPINGS["DigiRule FOSS License Exception"] = "DigiRule FOSS License Exception" LICENSEMAPPINGS["Do What The Fuck You Want To Public License"] = "Do What The F*ck You Want To Public License" LICENSEMAPPINGS["DOC Software License"] = "DOC License" LICENSEMAPPINGS["Dotseqn License"] = "Dotseqn License" LICENSEMAPPINGS["DSDP License"] = "DSDP License" LICENSEMAPPINGS["dvipdfm License"] = "dvipdfm License" LICENSEMAPPINGS["Eclipse Public License 1.0"] = "Eclipse Public License 1.0" LICENSEMAPPINGS["Eclipse Public License 2.0"] = "Eclipse Public License 2.0" LICENSEMAPPINGS["eCos exception 2.0"] = "eCos exception 2.0" LICENSEMAPPINGS["eCos license version 2.0"] = "eCos license version 2.0" LICENSEMAPPINGS["Educational Community License v1.0"] = "Educational Community License v1.0" LICENSEMAPPINGS["Educational Community License v2.0"] = "Educational Community License v2.0" LICENSEMAPPINGS["eGenix.com Public License 1.1.0"] = "eGenix.com Public License 1.1.0" LICENSEMAPPINGS["Eiffel Forum License v1.0"] = "Eiffel Forum License v1.0" LICENSEMAPPINGS["Eiffel Forum License v2.0"] = "Eiffel Forum License v2.0" LICENSEMAPPINGS["Enlightenment License (e16)"] = "Enlightenment License (e16)" LICENSEMAPPINGS["enna License"] = "enna License" LICENSEMAPPINGS["Entessa Public License"] = "Entessa Public License v1.0" LICENSEMAPPINGS["Erlang Public License v1.1"] = "Erlang Public License v1.1" LICENSEMAPPINGS["EU DataGrid Software License"] = "EU DataGrid Software License" LICENSEMAPPINGS["European Union Public License 1.0"] = "European Union Public License 1.0" LICENSEMAPPINGS["European Union Public License 1.1"] = "European Union Public License 1.1" LICENSEMAPPINGS["European Union Public License 1.2"] = "European Union Public License 1.2" LICENSEMAPPINGS["Eurosym License v2"] = "Eurosym License" LICENSEMAPPINGS["Fair License"] = "Fair License" LICENSEMAPPINGS["FastCGI"] = "Open Market License" LICENSEMAPPINGS["Fawkes Runtime Exception"] = "Fawkes Runtime Exception" LICENSEMAPPINGS["feh License"] = "feh License" LICENSEMAPPINGS["FLTK exception"] = "FLTK exception" LICENSEMAPPINGS["Font exception 2.0"] = "Font exception 2.0" LICENSEMAPPINGS["Frameworx Open License 1.0"] = "Frameworx Open License 1.0" LICENSEMAPPINGS["FreeImage Public License v1.0"] = "FreeImage Public License v1.0" LICENSEMAPPINGS["FreeRTOS Exception 2.0"] = "FreeRTOS Exception 2.0" LICENSEMAPPINGS["FreeType License"] = "Freetype Project License" LICENSEMAPPINGS["FSF All Permissive License"] = "FSF All Permissive License" LICENSEMAPPINGS["FSF Unlimited License"] = "FSF Unlimited License" LICENSEMAPPINGS["FSF Unlimited License (with License Retention)"] = "FSF Unlimited License (with License Retention)" LICENSEMAPPINGS["GCC Runtime Library exception 2.0"] = "GCC Runtime Library exception 2.0" LICENSEMAPPINGS["GCC Runtime Library exception 3.1"] = "GCC Runtime Library exception 3.1" LICENSEMAPPINGS["GL2PS License, Version 2"] = "GL2PS License" LICENSEMAPPINGS["Glulxe License"] = "Glulxe License" LICENSEMAPPINGS["GNU Affero General Public License v3.0"] = "GNU Affero General Public License v3.0" LICENSEMAPPINGS["GNU Free Documentation License v1.1"] = "GNU Free Documentation License v1.1" LICENSEMAPPINGS["GNU Free Documentation License v1.2"] = "GNU Free Documentation License v1.2" LICENSEMAPPINGS["GNU Free Documentation License v1.3"] = "GNU Free Documentation License v1.3" LICENSEMAPPINGS["GNU General Public License v1.0"] = "GNU General Public License v1.0 only" LICENSEMAPPINGS["GNU General Public License v1.0 or later"] = "GNU General Public License v1.0 or later" LICENSEMAPPINGS["GNU General Public License v2.0"] = "GNU General Public License v2.0 only" LICENSEMAPPINGS["GNU General Public License v2.0 or later"] = "GNU General Public License v2.0 or later" LICENSEMAPPINGS["GNU General Public License v2.0 w/Autoconf exception"] = "GNU General Public License v2.0 w/Autoconf exception" LICENSEMAPPINGS["GNU General Public License v2.0 w/Bison exception"] = "GNU General Public License v2.0 w/Bison exception" LICENSEMAPPINGS["GNU General Public License v2.0 w/Font exception"] = "GNU General Public License v2.0 w/Font exception" LICENSEMAPPINGS["GNU General Public License v2.0 w/GCC Runtime Library exception"] = "GNU General Public License v2.0 w/GCC Runtime Library exception" LICENSEMAPPINGS["GNU General Public License v2.0 with Classpath Exception"] = "GNU General Public License v2.0 w/Classpath exception" LICENSEMAPPINGS["GNU General Public License v3.0"] = "GNU General Public License v3.0 only" LICENSEMAPPINGS["GNU General Public License v3.0 or later"] = "GNU General Public License v3.0 or later" LICENSEMAPPINGS["GNU General Public License v3.0 w/Autoconf exception"] = "GNU General Public License v3.0 w/Autoconf exception" LICENSEMAPPINGS["GNU General Public License v3.0 w/GCC Runtime Library exception"] = "GNU General Public License v3.0 w/GCC Runtime Library exception" LICENSEMAPPINGS["GNU JavaMail exception"] = "GNU JavaMail exception" LICENSEMAPPINGS["GNU Lesser General Public License v2.1"] = "GNU Lesser General Public License v2.1 only" LICENSEMAPPINGS["GNU Lesser General Public License v2.1 or later"] = "GNU Lesser General Public License v2.1 or later" LICENSEMAPPINGS["GNU Lesser General Public License v3.0"] = "GNU Lesser General Public License v3.0 only" LICENSEMAPPINGS["GNU Lesser General Public License v3.0 or later"] = "GNU Lesser General Public License v3.0 or later" LICENSEMAPPINGS["GNU Library General Public License v2.0"] = "GNU Library General Public License v2 only" LICENSEMAPPINGS["GNU Library General Public License v2.0 or later"] = "GNU Library General Public License v2 or later" LICENSEMAPPINGS["gnuplot License"] = "gnuplot License" LICENSEMAPPINGS["gSOAP Public License v1.3b"] = "gSOAP Public License v1.3b" LICENSEMAPPINGS["Haskell Language Report License"] = "Haskell Language Report License" LICENSEMAPPINGS["Historic Permission Notice and Disclaimer"] = "Historic Permission Notice and Disclaimer" LICENSEMAPPINGS["i2p GPL+Java Exception"] = "i2p GPL+Java Exception" LICENSEMAPPINGS["IBM PowerPC Initialization and Boot Software"] = "IBM PowerPC Initialization and Boot Software" LICENSEMAPPINGS["IBM Public License v1.0"] = "IBM Public License v1.0" LICENSEMAPPINGS["ICU License"] = "ICU License" LICENSEMAPPINGS["ImageMagick (Apache 2.0) License"] = "ImageMagick License" LICENSEMAPPINGS["iMatix Standard Function Library Agreement"] = "iMatix Standard Function Library Agreement" LICENSEMAPPINGS["Imlib2 License"] = "Imlib2 License" LICENSEMAPPINGS["Independent JPEG Group License"] = "Independent JPEG Group License" LICENSEMAPPINGS["Info-ZIP License"] = "Info-ZIP License" LICENSEMAPPINGS["Intel ACPI Software License Agreement"] = "Intel ACPI Software License Agreement" LICENSEMAPPINGS["Intel Open Source License"] = "Intel Open Source License" LICENSEMAPPINGS["Interbase Public License v1.0"] = "Interbase Public License v1.0" LICENSEMAPPINGS["IPA Font License"] = "IPA Font License" LICENSEMAPPINGS["ISC License (ISC)"] = "ISC License" LICENSEMAPPINGS["JasPer License Version 2.0"] = "JasPer License" LICENSEMAPPINGS["Jython License"] = "CNRI Jython License" LICENSEMAPPINGS["LaTeX Project Public License v1.0"] = "LaTeX Project Public License v1.0" LICENSEMAPPINGS["LaTeX Project Public License v1.1"] = "LaTeX Project Public License v1.1" LICENSEMAPPINGS["LaTeX Project Public License v1.2"] = "LaTeX Project Public License v1.2" LICENSEMAPPINGS["LaTeX Project Public License v1.3a"] = "LaTeX Project Public License v1.3a" LICENSEMAPPINGS["LaTeX Project Public License v1.3c"] = "LaTeX Project Public License v1.3c" LICENSEMAPPINGS["Latex2e License"] = "Latex2e License" LICENSEMAPPINGS["Lawrence Berkeley National Labs BSD variant license"] = "Lawrence Berkeley National Labs BSD variant license" LICENSEMAPPINGS["Leptonica License"] = "Leptonica License" LICENSEMAPPINGS["Lesser General Public License For Linguistic Resources"] = "Lesser General Public License For Linguistic Resources" LICENSEMAPPINGS["libpng License"] = "libpng License" LICENSEMAPPINGS["libtiff License"] = "libtiff License" LICENSEMAPPINGS["Libtool Exception"] = "Libtool Exception" LICENSEMAPPINGS["Licence Art Libre 1.2"] = "Licence Art Libre 1.2" LICENSEMAPPINGS["Licence Art Libre 1.3"] = "Licence Art Libre 1.3" LICENSEMAPPINGS["Licence Libre du Québec – Permissive version 1.1"] = "Licence Libre du Québec – Permissive version 1.1" LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité forte version 1.1"] = "Licence Libre du Québec – Réciprocité forte version 1.1" LICENSEMAPPINGS["Licence Libre du Québec – Réciprocité version 1.1"] = "Licence Libre du Québec – Réciprocité version 1.1" LICENSEMAPPINGS["Linux Kernel Variant of OpenIB.org license"] = "Linux Kernel Variant of OpenIB.org license" LICENSEMAPPINGS["Linux Syscall Note"] = "Linux Syscall Note" LICENSEMAPPINGS["LLVM Exception"] = "LLVM Exception" LICENSEMAPPINGS["Lucent Public License v1.0"] = "Lucent Public License Version 1.0" LICENSEMAPPINGS["Lucent Public License v1.02 (Plan9)"] = "Lucent Public License v1.02" LICENSEMAPPINGS["LZMA exception"] = "LZMA exception" LICENSEMAPPINGS["Macros and Inline Functions Exception"] = "Macros and Inline Functions Exception" LICENSEMAPPINGS["MakeIndex License"] = "MakeIndex License" LICENSEMAPPINGS["Matrix Template Library License"] = "Matrix Template Library License" LICENSEMAPPINGS["Microsoft Public License (Ms-PL)"] = "Microsoft Public License" LICENSEMAPPINGS["Microsoft Reciprocal License (Ms-RL)"] = "Microsoft Reciprocal License" LICENSEMAPPINGS["MirOS Licence"] = "MirOS Licence" LICENSEMAPPINGS["MIT +no-false-attribs license"] = "MIT +no-false-attribs license" LICENSEMAPPINGS["MIT License (Expat)"] = "MIT license" LICENSEMAPPINGS["MIT No Attribution"] = "MIT No Attribution" LICENSEMAPPINGS["Motosoto License"] = "Motosoto License" LICENSEMAPPINGS["Mozilla Public License 1.0"] = "Mozilla Public License 1.0" LICENSEMAPPINGS["Mozilla Public License 1.1"] = "Mozilla Public License 1.1" LICENSEMAPPINGS["Mozilla Public License 2.0"] = "Mozilla Public License 2.0" LICENSEMAPPINGS["Mozilla Public License 2.0 (no copyleft exception)"] = "Mozilla Public License 2.0 (no copyleft exception)" LICENSEMAPPINGS["MPICH2 License"] = "mpich2 License" LICENSEMAPPINGS["Multics License"] = "Multics License" LICENSEMAPPINGS["Mup License"] = "Mup License" LICENSEMAPPINGS["NASA Open Source Agreement 1.3"] = "NASA Open Source Agreement 1.3" LICENSEMAPPINGS["Naumen Public License"] = "Naumen Public License" LICENSEMAPPINGS["Net Boolean Public License v1"] = "Net Boolean Public License v1" LICENSEMAPPINGS["netCDF License"] = "NetCDF license" LICENSEMAPPINGS["Nethack General Public License"] = "Nethack General Public License" LICENSEMAPPINGS["Netizen Open Source License v1.0"] = "Netizen Open Source License" LICENSEMAPPINGS["Netscape Public License 1.0"] = "Netscape Public License v1.0" LICENSEMAPPINGS["Netscape Public License 1.1"] = "Netscape Public License v1.1" LICENSEMAPPINGS["Net-SNMP License"] = "Net-SNMP License" LICENSEMAPPINGS["Newsletr License"] = "Newsletr License" LICENSEMAPPINGS["No Limit Public License"] = "No Limit Public License" LICENSEMAPPINGS["Nokia Open Source License"] = "Nokia Open Source License" LICENSEMAPPINGS["Nokia Qt LGPL exception 1.1"] = "Nokia Qt LGPL exception 1.1" LICENSEMAPPINGS["Non-Profit Open Software License 3.0"] = "Non-Profit Open Software License 3.0" LICENSEMAPPINGS["Norwegian Licence for Open Government Data"] = "Norwegian Licence for Open Government Data" LICENSEMAPPINGS["Noweb License"] = "Noweb License" LICENSEMAPPINGS["NTP License"] = "NTP License" LICENSEMAPPINGS["Nunit License"] = "Nunit License" LICENSEMAPPINGS["OCLC Research Public License 2.0"] = "OCLC Research Public License 2.0" LICENSEMAPPINGS["ODC Open Database License v1.0"] = "ODC Open Database License v1.0" LICENSEMAPPINGS["ODC Public Domain Dedication & License 1.0"] = "ODC Public Domain Dedication & License 1.0" LICENSEMAPPINGS["Open CASCADE Exception 1.0"] = "Open CASCADE Exception 1.0" LICENSEMAPPINGS["Open CASCADE Technology Public License"] = "Open CASCADE Technology Public License" LICENSEMAPPINGS["Open Group Test Suite License"] = "Open Group Test Suite License" LICENSEMAPPINGS["Open LDAP Public License 2.2.2"] = "Open LDAP Public License 2.2.2" LICENSEMAPPINGS["Open LDAP Public License v1.1"] = "Open LDAP Public License v1.1" LICENSEMAPPINGS["Open LDAP Public License v1.3"] = "Open LDAP Public License v1.3" LICENSEMAPPINGS["Open LDAP Public License v1.4"] = "Open LDAP Public License v1.4" LICENSEMAPPINGS["Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)"] = "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)" LICENSEMAPPINGS["Open LDAP Public License v2.1"] = "Open LDAP Public License v2.1" LICENSEMAPPINGS["Open LDAP Public License v2.2"] = "Open LDAP Public License v2.2" LICENSEMAPPINGS["Open LDAP Public License v2.2.1"] = "Open LDAP Public License v2.2.1" LICENSEMAPPINGS["Open LDAP Public License v2.5"] = "Open LDAP Public License v2.5" LICENSEMAPPINGS["Open LDAP Public License v2.6"] = "Open LDAP Public License v2.6" LICENSEMAPPINGS["Open Public License v1.0"] = "Open Public License v1.0" LICENSEMAPPINGS["Open Software License 1.0"] = "Open Software License 1.0" LICENSEMAPPINGS["Open Software License 1.1"] = "Open Software License 1.1" LICENSEMAPPINGS["Open Software License 2.0"] = "Open Software License 2.0" LICENSEMAPPINGS["Open Software License 2.1"] = "Open Software License 2.1" LICENSEMAPPINGS["Open Software License 3.0"] = "Open Software License 3.0" LICENSEMAPPINGS["OpenJDK Assembly exception 1.0"] = "OpenJDK Assembly exception 1.0" LICENSEMAPPINGS["OpenLDAP Public License v1.2"] = "Open LDAP Public License v1.2" LICENSEMAPPINGS["OpenLDAP Public License v2.0.1"] = "Open LDAP Public License v2.0.1" LICENSEMAPPINGS["OpenLDAP Public License v2.3"] = "Open LDAP Public License v2.3" LICENSEMAPPINGS["OpenLDAP Public License v2.4"] = "Open LDAP Public License v2.4" LICENSEMAPPINGS["OpenLDAP Public License v2.7"] = "Open LDAP Public License v2.7" LICENSEMAPPINGS["OpenLDAP Public License v2.8"] = "Open LDAP Public License v2.8" LICENSEMAPPINGS["OpenSSL License"] = "OpenSSL License" LICENSEMAPPINGS["OpenVPN OpenSSL Exception"] = "OpenVPN OpenSSL Exception" LICENSEMAPPINGS["OSET Public License version 2.1"] = "OSET Public License version 2.1" LICENSEMAPPINGS["PERL Artistic License"] = "Artistic License 1.0 (Perl)" LICENSEMAPPINGS["PHP License v3.0"] = "PHP License v3.0" LICENSEMAPPINGS["PHP License v3.01"] = "PHP License v3.01" LICENSEMAPPINGS["Plexus Classworlds License"] = "Plexus Classworlds License" LICENSEMAPPINGS["psfrag License"] = "psfrag License" LICENSEMAPPINGS["psutils License"] = "psutils License" LICENSEMAPPINGS["Python License 2.0"] = "Python License 2.0" LICENSEMAPPINGS["Q Public License 1.0"] = "Q Public License 1.0" LICENSEMAPPINGS["Qhull License"] = "Qhull License" LICENSEMAPPINGS["Qwt exception 1.0"] = "Qwt exception 1.0" LICENSEMAPPINGS["Rdisc License"] = "Rdisc License" LICENSEMAPPINGS["RealNetworks Public Source License v1.0"] = "RealNetworks Public Source License v1.0" LICENSEMAPPINGS["Reciprocal Public License"] = "Reciprocal Public License 1.1" LICENSEMAPPINGS["Reciprocal Public License 1.1"] = "Reciprocal Public License 1.1" LICENSEMAPPINGS["Reciprocal Public License 1.5"] = "Reciprocal Public License 1.5" LICENSEMAPPINGS["Red Hat eCos Public License v1.1"] = "Red Hat eCos Public License v1.1" LICENSEMAPPINGS["Ricoh Source Code Public License"] = "Ricoh Source Code Public License" LICENSEMAPPINGS["RSA Message-Digest License"] = "RSA Message-Digest License" LICENSEMAPPINGS["Ruby License"] = "Ruby License" LICENSEMAPPINGS["Sax Public Domain Notice"] = "Sax Public Domain Notice" LICENSEMAPPINGS["Saxpath License"] = "Saxpath License" LICENSEMAPPINGS["SCEA Shared Source License"] = "SCEA Shared Source License" LICENSEMAPPINGS["Scheme Widget Library (SWL) Software License Agreement"] = "Scheme Widget Library (SWL) Software License Agreement" LICENSEMAPPINGS["Secure Messaging Protocol Public License"] = "Secure Messaging Protocol Public License" LICENSEMAPPINGS["Sendmail License"] = "Sendmail License" LICENSEMAPPINGS["SGI Free Software License B v1.0"] = "SGI Free Software License B v1.0" LICENSEMAPPINGS["SGI Free Software License B v1.1"] = "SGI Free Software License B v1.1" LICENSEMAPPINGS["SGI Free Software License B v2.0"] = "SGI Free Software License B v2.0" LICENSEMAPPINGS["SIL Open Font License 1.0"] = "SIL Open Font License 1.0" LICENSEMAPPINGS["SIL Open Font License 1.1"] = "SIL Open Font License 1.1" LICENSEMAPPINGS["Simple Public License 2.0"] = "Simple Public License 2.0" LICENSEMAPPINGS["Sleepycat License"] = "Sleepycat License" LICENSEMAPPINGS["SNIA Public License 1.1"] = "SNIA Public License 1.1" LICENSEMAPPINGS["Spencer License 86"] = "Spencer License 86" LICENSEMAPPINGS["Spencer License 94"] = "Spencer License 94" LICENSEMAPPINGS["Spencer License 99"] = "Spencer License 99" LICENSEMAPPINGS["Standard ML of New Jersey License"] = "Standard ML of New Jersey License" LICENSEMAPPINGS["SugarCRM Public License v1.1.3"] = "SugarCRM Public License v1.1.3" LICENSEMAPPINGS["Sun Industry Standards Source License (SISSL) v1.1"] = "Sun Industry Standards Source License v1.1" LICENSEMAPPINGS["Sun Industry Standards Source License v1.2"] = "Sun Industry Standards Source License v1.2" LICENSEMAPPINGS["Sun Public License v1.0"] = "Sun Public License v1.0" LICENSEMAPPINGS["Sybase Open Watcom Public License 1.0"] = "Sybase Open Watcom Public License 1.0" LICENSEMAPPINGS["Tcl License"] = "TCL/TK License" LICENSEMAPPINGS["TCP Wrappers License"] = "TCP Wrappers License" LICENSEMAPPINGS["The Beerware License"] = "Beerware License" LICENSEMAPPINGS["The Code Project Open License (CPOL) 1.02"] = "Code Project Open License 1.02" LICENSEMAPPINGS["The Curl License"] = "curl License" LICENSEMAPPINGS["The JSON License"] = "JSON License" LICENSEMAPPINGS["The PostgreSQL License"] = "PostgreSQL License" LICENSEMAPPINGS["The Unlicense"] = "The Unlicense" LICENSEMAPPINGS["TMate License"] = "TMate Open Source License" LICENSEMAPPINGS["TORQUE v2.5+ Software License v1.1"] = "TORQUE v2.5+ Software License v1.1" LICENSEMAPPINGS["Trusster Open Source License"] = "Trusster Open Source License" LICENSEMAPPINGS["U-Boot exception 2.0"] = "U-Boot exception 2.0" LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2015)"] = "Unicode License Agreement - Data Files and Software (2015)" LICENSEMAPPINGS["Unicode License Agreement - Data Files and Software (2016)"] = "Unicode License Agreement - Data Files and Software (2016)" LICENSEMAPPINGS["Unicode Terms of Use"] = "Unicode Terms of Use" LICENSEMAPPINGS["Universal Permissive License v1.0"] = "Universal Permissive License v1.0" LICENSEMAPPINGS["University of Illinois/NCSA Open Source License"] = "University of Illinois/NCSA Open Source License" LICENSEMAPPINGS["US Naval Research Laboratory (NRL) v1.1"] = "NRL License" LICENSEMAPPINGS["Vim License"] = "Vim License" LICENSEMAPPINGS["VOSTROM Public License for Open Source"] = "VOSTROM Public License for Open Source" LICENSEMAPPINGS["Vovida Software License v1.0"] = "Vovida Software License v1.0" LICENSEMAPPINGS["W3C Software Notice and Document License (2015-05-13)"] = "W3C Software Notice and Document License (2015-05-13)" LICENSEMAPPINGS["W3C Software Notice and License (1998-07-20)"] = "W3C Software Notice and License (1998-07-20)" LICENSEMAPPINGS["W3C Software Notice and License (2002-12-31)"] = "W3C Software Notice and License (2002-12-31)" LICENSEMAPPINGS["Wsuipa License"] = "Wsuipa License" LICENSEMAPPINGS["WxWindows Library Exception 3.1"] = "WxWindows Library Exception 3.1" LICENSEMAPPINGS["wxWindows Library Licence, Version 3.1"] = "wxWindows Library License" LICENSEMAPPINGS["X.Net License"] = "X.Net License" LICENSEMAPPINGS["X11 License"] = "X11 License" LICENSEMAPPINGS["Xerox License"] = "Xerox License" LICENSEMAPPINGS["XFree86 License 1.1"] = "XFree86 License 1.1" LICENSEMAPPINGS["xinetd License"] = "xinetd License" LICENSEMAPPINGS["XPP License"] = "XPP License" LICENSEMAPPINGS["XSkat"] = "XSkat License" LICENSEMAPPINGS["Yahoo! Public License v1.0"] = "Yahoo! Public License v1.0" LICENSEMAPPINGS["Yahoo! Public License v1.1"] = "Yahoo! Public License v1.1" LICENSEMAPPINGS["Zed License"] = "Zed License" LICENSEMAPPINGS["Zend License v2.0"] = "Zend License v2.0" LICENSEMAPPINGS["Zimbra Public License v1.4"] = "Zimbra Public License v1.4" LICENSEMAPPINGS["Zimbra Publice License v1.3"] = "Zimbra Public License v1.3" LICENSEMAPPINGS["zlib License"] = "zlib License" LICENSEMAPPINGS["zlib/libpng License with Acknowledgement"] = "zlib/libpng License with Acknowledgement" LICENSEMAPPINGS["Zope Public License 1.1"] = "Zope Public License 1.1" LICENSEMAPPINGS["Zope Public License 2.0"] = "Zope Public License 2.0" LICENSEMAPPINGS["Zope Public License 2.1"] = "Zope Public License 2.1" | 1.611893 | 2 |
wxtbx/command_line/wxpython.py | dperl-sol/cctbx_project | 155 | 6618843 | # LIBTBX_SET_DISPATCHER_NAME cctbx.wxpython
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT
# LIBTBX_START_PYTHON
from __future__ import absolute_import, division, print_function
| # LIBTBX_SET_DISPATCHER_NAME cctbx.wxpython
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH PHENIX_GUI_ENVIRONMENT=1
# LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT
# LIBTBX_START_PYTHON
from __future__ import absolute_import, division, print_function
| en | 0.31843 | # LIBTBX_SET_DISPATCHER_NAME cctbx.wxpython # LIBTBX_PRE_DISPATCHER_INCLUDE_SH PHENIX_GUI_ENVIRONMENT=1 # LIBTBX_PRE_DISPATCHER_INCLUDE_SH export PHENIX_GUI_ENVIRONMENT # LIBTBX_START_PYTHON | 1.09624 | 1 |
bbbs/users/tests/test_models.py | AnnaKPolyakova/bbbs | 2 | 6618844 | from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.test import TestCase
from bbbs.common.factories import CityFactory
from bbbs.users.models import Profile
User = get_user_model()
USERNAME = "<EMAIL>"
USERNAME_SUPERUSER = "<EMAIL>"
NEW_USERNAME = "<EMAIL>"
CITY_NAME = "Москва"
CITY_2_NAME = "Мельбурн"
class UsersCreateTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.city = CityFactory(name=CITY_NAME)
cls.city_2 = CityFactory(name=CITY_2_NAME)
def setUp(self):
self.user = User.objects.create_user(
username=USERNAME,
email=USERNAME,
)
self.user_superuser = User.objects.create_superuser(
username=USERNAME_SUPERUSER,
)
def test_create_superuser(self):
"""Test create superuser."""
user = self.user_superuser
self.assertEqual(user.profile.city, self.city)
self.assertEqual(user.username, USERNAME_SUPERUSER)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
self.assertEqual(user.profile.role, Profile.Role.MENTOR)
def test_create_user(self):
"""Test for creating user."""
user = self.user
user.profile.city = self.city
self.assertEqual(user.username, USERNAME)
self.assertEqual(user.profile.role, Profile.Role.MENTOR)
self.assertEqual(user.profile.city, self.city)
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_admin(self):
"""Test for changing role (admin) and city"""
user = self.user
user.profile.city = self.city_2
user.profile.role = Profile.Role.ADMIN
user.profile.save()
self.assertEqual(user.username, USERNAME)
self.assertEqual(user.profile.city, self.city_2)
self.assertEqual(user.profile.role, Profile.Role.ADMIN)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_moderator_general(self):
"""Test for changing role (moderator general) and city"""
user = self.user
user.profile.city = self.city_2
user.profile.role = Profile.Role.MODERATOR_GEN
user.profile.save()
self.assertEqual(user.profile.city, self.city_2)
self.assertEqual(user.username, USERNAME)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertFalse(user.is_superuser)
self.assertEqual(user.profile.role, Profile.Role.MODERATOR_GEN)
def test_create_moderator_regional(self):
"""Test for changing role (moderator reg) and city, add regions."""
user = self.user
user.profile.role = Profile.Role.MODERATOR_REG
user.profile.city = self.city_2
user.profile.region.add(self.city, self.city_2)
user.profile.save()
self.assertEqual(user.username, USERNAME)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertFalse(user.is_superuser)
self.assertEqual(user.profile.city, self.city_2)
self.assertEqual(
[self.city, self.city_2], list(user.profile.region.all())
)
self.assertEqual(user.profile.role, Profile.Role.MODERATOR_REG)
def test_create_mentor(self):
"""Test for changing role (mentor) and city."""
user = self.user
user.profile.role = Profile.Role.MENTOR
user.profile.city = self.city_2
user.profile.save()
self.assertEqual(user.profile.city, self.city_2)
self.assertEqual(user.username, USERNAME)
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
self.assertEqual(user.profile.role, Profile.Role.MENTOR)
def test_mentor_has_to_have_curator(self):
"""Profile full_clean requires curator for mentor."""
user = self.user
with self.assertRaises(
ValidationError,
msg="Убедитесь, что нельзя сохранить ментора без куратора.",
):
user.profile.full_clean()
| from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from django.test import TestCase
from bbbs.common.factories import CityFactory
from bbbs.users.models import Profile
User = get_user_model()
USERNAME = "<EMAIL>"
USERNAME_SUPERUSER = "<EMAIL>"
NEW_USERNAME = "<EMAIL>"
CITY_NAME = "Москва"
CITY_2_NAME = "Мельбурн"
class UsersCreateTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.city = CityFactory(name=CITY_NAME)
cls.city_2 = CityFactory(name=CITY_2_NAME)
def setUp(self):
self.user = User.objects.create_user(
username=USERNAME,
email=USERNAME,
)
self.user_superuser = User.objects.create_superuser(
username=USERNAME_SUPERUSER,
)
def test_create_superuser(self):
"""Test create superuser."""
user = self.user_superuser
self.assertEqual(user.profile.city, self.city)
self.assertEqual(user.username, USERNAME_SUPERUSER)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertTrue(user.is_superuser)
self.assertEqual(user.profile.role, Profile.Role.MENTOR)
def test_create_user(self):
"""Test for creating user."""
user = self.user
user.profile.city = self.city
self.assertEqual(user.username, USERNAME)
self.assertEqual(user.profile.role, Profile.Role.MENTOR)
self.assertEqual(user.profile.city, self.city)
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_admin(self):
"""Test for changing role (admin) and city"""
user = self.user
user.profile.city = self.city_2
user.profile.role = Profile.Role.ADMIN
user.profile.save()
self.assertEqual(user.username, USERNAME)
self.assertEqual(user.profile.city, self.city_2)
self.assertEqual(user.profile.role, Profile.Role.ADMIN)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertFalse(user.is_superuser)
def test_create_moderator_general(self):
"""Test for changing role (moderator general) and city"""
user = self.user
user.profile.city = self.city_2
user.profile.role = Profile.Role.MODERATOR_GEN
user.profile.save()
self.assertEqual(user.profile.city, self.city_2)
self.assertEqual(user.username, USERNAME)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertFalse(user.is_superuser)
self.assertEqual(user.profile.role, Profile.Role.MODERATOR_GEN)
def test_create_moderator_regional(self):
"""Test for changing role (moderator reg) and city, add regions."""
user = self.user
user.profile.role = Profile.Role.MODERATOR_REG
user.profile.city = self.city_2
user.profile.region.add(self.city, self.city_2)
user.profile.save()
self.assertEqual(user.username, USERNAME)
self.assertTrue(user.is_active)
self.assertTrue(user.is_staff)
self.assertFalse(user.is_superuser)
self.assertEqual(user.profile.city, self.city_2)
self.assertEqual(
[self.city, self.city_2], list(user.profile.region.all())
)
self.assertEqual(user.profile.role, Profile.Role.MODERATOR_REG)
def test_create_mentor(self):
"""Test for changing role (mentor) and city."""
user = self.user
user.profile.role = Profile.Role.MENTOR
user.profile.city = self.city_2
user.profile.save()
self.assertEqual(user.profile.city, self.city_2)
self.assertEqual(user.username, USERNAME)
self.assertTrue(user.is_active)
self.assertFalse(user.is_staff)
self.assertFalse(user.is_superuser)
self.assertEqual(user.profile.role, Profile.Role.MENTOR)
def test_mentor_has_to_have_curator(self):
"""Profile full_clean requires curator for mentor."""
user = self.user
with self.assertRaises(
ValidationError,
msg="Убедитесь, что нельзя сохранить ментора без куратора.",
):
user.profile.full_clean()
| en | 0.794542 | Test create superuser. Test for creating user. Test for changing role (admin) and city Test for changing role (moderator general) and city Test for changing role (moderator reg) and city, add regions. Test for changing role (mentor) and city. Profile full_clean requires curator for mentor. | 2.4865 | 2 |
app/notebooks/problang/transcript_utils.py | scanner-research/esper-tv | 5 | 6618845 | <gh_stars>1-10
import numpy as np
import torch
from torch.utils.data import Dataset
import requests
from query.models import Video
from timeit import default_timer as now
from esper.prelude import pcache
import random
SEGMENT_SIZE = 200
SEGMENT_STRIDE = 100
def video_list():
r = requests.get('http://localhost:8111/videos')
return r.json()
def get_doc(item):
r = requests.post('http://localhost:8111/getdoc', json={'phrases': [item]})
return r.json()
def doc_len():
r = requests.get('http://localhost:8111/doclen')
return r.json()
def compute_vectors(docs, vocabulary, window_size, stride):
requests.post('http://localhost:8111/computevectors', json={
'vocabulary': vocabulary,
'docs': docs,
'window_size': window_size,
'stride': stride
})
def find_segments(docs, lexicon, threshold, window_size, stride):
r = requests.post('http://localhost:8111/findsegments', json={
'lexicon': lexicon,
'threshold': threshold,
'window_size': window_size,
'merge_overlaps': False,
'stride': stride,
'docs': docs
})
return r.json()
def small_video_sample():
videos = []
id = 1
while len(videos) < 10:
try:
v = Video.objects.get(id=id)
get_doc(v)
videos.append(v)
except Exception:
pass
id += 1
return videos
def word_counts():
r = requests.get('http://localhost:8111/wordcounts')
return r.json()
VOCAB_THRESHOLD = 100
def load_vocab():
counts = word_counts()
print('Full vocabulary size: {}'.format(len(counts)))
vocabulary = sorted([word for (word, count) in counts.items() if count > VOCAB_THRESHOLD])
print('Filtered vocabulary size: {}'.format(len(vocabulary)))
return vocabulary
vocabulary = pcache.get('vocabulary', load_vocab)
vocab_size = len(vocabulary)
class SegmentTextDataset(Dataset):
def __init__(self, docs, vocabulary=None, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False):
self._segment_size = segment_size
self._use_cuda = use_cuda
self._vocabulary = vocabulary
self._doc_names = docs
self._doc_lens = doc_len()
self._num_segs = np.array([
len(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
for doc in self._doc_names
])
self._back_index = [
(i, j, k)
for i, doc in enumerate(self._doc_names)
for k, j in enumerate(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
]
self._forward_index = {
(self._doc_names[i], j): k
for k, (i, j, _) in enumerate(self._back_index)
}
self._docs = {}
self._segs = {}
def segment_index(self, doc, word):
return self._forward_index[(doc, word)]
def _text_to_vector(self, words):
counts = defaultdict(int)
for w in words:
counts[w] += 1
t = torch.tensor([counts[word] for word in self._vocabulary], dtype=torch.float32)
t /= torch.sum(t)
return t
def __len__(self):
return self._num_segs.sum()
def __getitem__(self, idx):
(i, j, _) = self._back_index[idx]
if not (i, j) in self._segs:
if not i in self._docs:
self._docs[i] = get_doc(self._doc_names[i])
seg = self._docs[i][j:j+self._segment_size]
data = {
'document_idx': i,
'segment_idx': j,
}
if self._vocabulary is not None:
data['vector'] = self._text_to_vector(seg)
if self._use_cuda:
data['vector'] = data['vector'].cuda()
data['segment'] = ' '.join(seg)
self._segs[(i, j)] = data
return self._segs[(i, j)]
import mmap
class SegmentVectorDataset(Dataset):
def __init__(self, docs, vocab_size, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False, inmemory=False):
self._ds = SegmentTextDataset(docs, segment_size=segment_size, segment_stride=segment_stride)
self._doc_names = docs
self._vocab_size = vocab_size
self._use_cuda = use_cuda
self._inmemory = inmemory
self._file_handle = open('/app/data/segvectors.bin', 'r+b')
self._file = mmap.mmap(self._file_handle.fileno(), 0)
self._byte_offsets = []
if self._inmemory:
self._buffer = self._file.read()
# Compute prefix sum of document offsets
for i, doc in enumerate(self._doc_names):
dlen = self._ds._num_segs[i-1] * self._vocab_size
if i == 0:
self._byte_offsets.append(0)
else:
self._byte_offsets.append(self._byte_offsets[i - 1] + dlen)
def _byte_offset(self, idx):
(i, _, j) = self._ds._back_index[idx]
return self._byte_offsets[i] + j * self._vocab_size
def __len__(self):
return len(self._ds)
def __getitem__(self, idx):
offset = self._byte_offset(idx)
if self._inmemory:
byts = self._buffer[offset:offset+self._vocab_size]
else:
self._file.seek(offset)
byts = self._file.read(self._vocab_size)
assert len(byts) == self._vocab_size, \
'Invalid read at index {}, offset {}. Expected {} bytes, got {}'.format(idx, offset, self._vocab_size, len(byts))
npbuf = np.frombuffer(byts, dtype=np.uint8)
tbuf = torch.from_numpy(npbuf).float()
tbuf /= torch.sum(tbuf)
if self._use_cuda:
tbuf = tbuf.cuda()
return tbuf, idx
class LabeledSegmentDataset(Dataset):
def __init__(self, unlabeled_dataset, labels, categories):
self._ds = unlabeled_dataset
self._labels = labels
self._categories = categories
def __len__(self):
return len(self._labels)
def __getitem__(self, idx):
(seg_idx, label) = self._labels[idx]
label = torch.tensor([1 if label == i else 0 for i in range(self._categories)], dtype=torch.float32)
if self._ds._use_cuda:
label = label.cuda()
tbuf, _ = self._ds[seg_idx]
return tbuf, label, seg_idx
def label_widget(dataset, indices, done_callback):
from IPython.display import display, clear_output
from ipywidgets import Text, HTML, Button
labels = []
i = 0
transcript = HTML(dataset[indices[0]]['segment'])
box = Text(placeholder='y/n')
def on_submit(text):
nonlocal i
label = 1 if text.value == 'y' else 0
labels.append((indices[i], label))
i += 1
transcript.value = dataset[indices[i]]['segment']
box.value = ''
box.on_submit(on_submit)
finished = False
btn_finished = Button(description='Finished')
def on_click(b):
done_callback(labels)
btn_finished.on_click(on_click)
display(transcript)
display(box)
display(btn_finished)
| import numpy as np
import torch
from torch.utils.data import Dataset
import requests
from query.models import Video
from timeit import default_timer as now
from esper.prelude import pcache
import random
SEGMENT_SIZE = 200
SEGMENT_STRIDE = 100
def video_list():
r = requests.get('http://localhost:8111/videos')
return r.json()
def get_doc(item):
r = requests.post('http://localhost:8111/getdoc', json={'phrases': [item]})
return r.json()
def doc_len():
r = requests.get('http://localhost:8111/doclen')
return r.json()
def compute_vectors(docs, vocabulary, window_size, stride):
requests.post('http://localhost:8111/computevectors', json={
'vocabulary': vocabulary,
'docs': docs,
'window_size': window_size,
'stride': stride
})
def find_segments(docs, lexicon, threshold, window_size, stride):
r = requests.post('http://localhost:8111/findsegments', json={
'lexicon': lexicon,
'threshold': threshold,
'window_size': window_size,
'merge_overlaps': False,
'stride': stride,
'docs': docs
})
return r.json()
def small_video_sample():
videos = []
id = 1
while len(videos) < 10:
try:
v = Video.objects.get(id=id)
get_doc(v)
videos.append(v)
except Exception:
pass
id += 1
return videos
def word_counts():
r = requests.get('http://localhost:8111/wordcounts')
return r.json()
VOCAB_THRESHOLD = 100
def load_vocab():
counts = word_counts()
print('Full vocabulary size: {}'.format(len(counts)))
vocabulary = sorted([word for (word, count) in counts.items() if count > VOCAB_THRESHOLD])
print('Filtered vocabulary size: {}'.format(len(vocabulary)))
return vocabulary
vocabulary = pcache.get('vocabulary', load_vocab)
vocab_size = len(vocabulary)
class SegmentTextDataset(Dataset):
def __init__(self, docs, vocabulary=None, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False):
self._segment_size = segment_size
self._use_cuda = use_cuda
self._vocabulary = vocabulary
self._doc_names = docs
self._doc_lens = doc_len()
self._num_segs = np.array([
len(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
for doc in self._doc_names
])
self._back_index = [
(i, j, k)
for i, doc in enumerate(self._doc_names)
for k, j in enumerate(range(0, self._doc_lens[doc]-segment_size+1, segment_stride))
]
self._forward_index = {
(self._doc_names[i], j): k
for k, (i, j, _) in enumerate(self._back_index)
}
self._docs = {}
self._segs = {}
def segment_index(self, doc, word):
return self._forward_index[(doc, word)]
def _text_to_vector(self, words):
counts = defaultdict(int)
for w in words:
counts[w] += 1
t = torch.tensor([counts[word] for word in self._vocabulary], dtype=torch.float32)
t /= torch.sum(t)
return t
def __len__(self):
return self._num_segs.sum()
def __getitem__(self, idx):
(i, j, _) = self._back_index[idx]
if not (i, j) in self._segs:
if not i in self._docs:
self._docs[i] = get_doc(self._doc_names[i])
seg = self._docs[i][j:j+self._segment_size]
data = {
'document_idx': i,
'segment_idx': j,
}
if self._vocabulary is not None:
data['vector'] = self._text_to_vector(seg)
if self._use_cuda:
data['vector'] = data['vector'].cuda()
data['segment'] = ' '.join(seg)
self._segs[(i, j)] = data
return self._segs[(i, j)]
import mmap
class SegmentVectorDataset(Dataset):
def __init__(self, docs, vocab_size, segment_size=SEGMENT_SIZE, segment_stride=SEGMENT_STRIDE, use_cuda=False, inmemory=False):
self._ds = SegmentTextDataset(docs, segment_size=segment_size, segment_stride=segment_stride)
self._doc_names = docs
self._vocab_size = vocab_size
self._use_cuda = use_cuda
self._inmemory = inmemory
self._file_handle = open('/app/data/segvectors.bin', 'r+b')
self._file = mmap.mmap(self._file_handle.fileno(), 0)
self._byte_offsets = []
if self._inmemory:
self._buffer = self._file.read()
# Compute prefix sum of document offsets
for i, doc in enumerate(self._doc_names):
dlen = self._ds._num_segs[i-1] * self._vocab_size
if i == 0:
self._byte_offsets.append(0)
else:
self._byte_offsets.append(self._byte_offsets[i - 1] + dlen)
def _byte_offset(self, idx):
(i, _, j) = self._ds._back_index[idx]
return self._byte_offsets[i] + j * self._vocab_size
def __len__(self):
return len(self._ds)
def __getitem__(self, idx):
offset = self._byte_offset(idx)
if self._inmemory:
byts = self._buffer[offset:offset+self._vocab_size]
else:
self._file.seek(offset)
byts = self._file.read(self._vocab_size)
assert len(byts) == self._vocab_size, \
'Invalid read at index {}, offset {}. Expected {} bytes, got {}'.format(idx, offset, self._vocab_size, len(byts))
npbuf = np.frombuffer(byts, dtype=np.uint8)
tbuf = torch.from_numpy(npbuf).float()
tbuf /= torch.sum(tbuf)
if self._use_cuda:
tbuf = tbuf.cuda()
return tbuf, idx
class LabeledSegmentDataset(Dataset):
def __init__(self, unlabeled_dataset, labels, categories):
self._ds = unlabeled_dataset
self._labels = labels
self._categories = categories
def __len__(self):
return len(self._labels)
def __getitem__(self, idx):
(seg_idx, label) = self._labels[idx]
label = torch.tensor([1 if label == i else 0 for i in range(self._categories)], dtype=torch.float32)
if self._ds._use_cuda:
label = label.cuda()
tbuf, _ = self._ds[seg_idx]
return tbuf, label, seg_idx
def label_widget(dataset, indices, done_callback):
from IPython.display import display, clear_output
from ipywidgets import Text, HTML, Button
labels = []
i = 0
transcript = HTML(dataset[indices[0]]['segment'])
box = Text(placeholder='y/n')
def on_submit(text):
nonlocal i
label = 1 if text.value == 'y' else 0
labels.append((indices[i], label))
i += 1
transcript.value = dataset[indices[i]]['segment']
box.value = ''
box.on_submit(on_submit)
finished = False
btn_finished = Button(description='Finished')
def on_click(b):
done_callback(labels)
btn_finished.on_click(on_click)
display(transcript)
display(box)
display(btn_finished) | en | 0.431967 | # Compute prefix sum of document offsets | 2.325398 | 2 |
python/getdist/covcomb.py | sjoudaki/CosmoJBD | 1 | 6618846 | <filename>python/getdist/covcomb.py
# usage:
# python covcmb.py out.covmat in1.covmat in2.covmat
# Nb. in1 values take priority over in2
from __future__ import print_function
import sys
from getdist import covmat
if len(sys.argv) < 3:
print('Usage: python covcmb.py out.covmat in1.covmat in2.covmat [in3.covmat...]')
sys.exit()
foutname = sys.argv[1]
cov = covmat.CovMat(sys.argv[2])
for fname in sys.argv[3:]:
print('merging: ' + fname)
cov = cov.mergeCovmatWhereNew(covmat.CovMat(fname))
cov.saveToFile(foutname)
| <filename>python/getdist/covcomb.py
# usage:
# python covcmb.py out.covmat in1.covmat in2.covmat
# Nb. in1 values take priority over in2
from __future__ import print_function
import sys
from getdist import covmat
if len(sys.argv) < 3:
print('Usage: python covcmb.py out.covmat in1.covmat in2.covmat [in3.covmat...]')
sys.exit()
foutname = sys.argv[1]
cov = covmat.CovMat(sys.argv[2])
for fname in sys.argv[3:]:
print('merging: ' + fname)
cov = cov.mergeCovmatWhereNew(covmat.CovMat(fname))
cov.saveToFile(foutname)
| en | 0.254484 | # usage: # python covcmb.py out.covmat in1.covmat in2.covmat # Nb. in1 values take priority over in2 | 2.541413 | 3 |
net/tests/test_models.py | maznu/peering-manager | 173 | 6618847 | <reponame>maznu/peering-manager<gh_stars>100-1000
from django.test import TestCase
from net.enums import ConnectionState
from net.models import Connection
from peering.enums import DeviceState
from peering.models import AutonomousSystem, InternetExchange, Router
class ConnectionTest(TestCase):
@classmethod
def setUpTestData(cls):
local_autonomous_system = AutonomousSystem.objects.create(
asn=201281, name="<NAME>", affiliated=True
)
internet_exchange_point = InternetExchange.objects.create(
name="Test", slug="test", local_autonomous_system=local_autonomous_system
)
router = Router.objects.create(
name="Test",
hostname="test.example.com",
device_state=DeviceState.ENABLED,
local_autonomous_system=local_autonomous_system,
)
cls.connection = Connection(
state=ConnectionState.ENABLED,
vlan=2001,
ipv6_address="2001:db8:10::1",
internet_exchange_point=internet_exchange_point,
router=router,
)
def test_linked_to_peeringdb(self):
self.assertFalse(self.connection.linked_to_peeringdb)
def test_link_to_peeringdb(self):
self.assertIsNone(self.connection.link_to_peeringdb())
def test__str__(self):
self.assertEqual(f"Test on Test", str(self.connection))
self.connection.router = None
self.connection.interface = ""
self.connection.save()
self.assertEqual(f"Test", str(self.connection))
self.connection.internet_exchange_point = None
self.connection.save()
self.assertEqual(f"Connection #{self.connection.pk}", str(self.connection))
| from django.test import TestCase
from net.enums import ConnectionState
from net.models import Connection
from peering.enums import DeviceState
from peering.models import AutonomousSystem, InternetExchange, Router
class ConnectionTest(TestCase):
@classmethod
def setUpTestData(cls):
local_autonomous_system = AutonomousSystem.objects.create(
asn=201281, name="<NAME>", affiliated=True
)
internet_exchange_point = InternetExchange.objects.create(
name="Test", slug="test", local_autonomous_system=local_autonomous_system
)
router = Router.objects.create(
name="Test",
hostname="test.example.com",
device_state=DeviceState.ENABLED,
local_autonomous_system=local_autonomous_system,
)
cls.connection = Connection(
state=ConnectionState.ENABLED,
vlan=2001,
ipv6_address="2001:db8:10::1",
internet_exchange_point=internet_exchange_point,
router=router,
)
def test_linked_to_peeringdb(self):
self.assertFalse(self.connection.linked_to_peeringdb)
def test_link_to_peeringdb(self):
self.assertIsNone(self.connection.link_to_peeringdb())
def test__str__(self):
self.assertEqual(f"Test on Test", str(self.connection))
self.connection.router = None
self.connection.interface = ""
self.connection.save()
self.assertEqual(f"Test", str(self.connection))
self.connection.internet_exchange_point = None
self.connection.save()
self.assertEqual(f"Connection #{self.connection.pk}", str(self.connection)) | en | 0.293478 | #{self.connection.pk}", str(self.connection)) | 2.281813 | 2 |
tests/_fixtures/fixture_barset.py | Chavithra/investments-toolkit | 3 | 6618848 | import pytest
from investmentstk.models.barset import barset_from_csv_string
@pytest.fixture
def barset_volvo_2_months():
csv_string = """
2021-07-01,207.3,208.35,205.9,207.9
2021-07-02,208.35,209.1,206.1,206.1
2021-07-05,208.2,209.95,205.45,209.75
2021-07-06,210.0,211.85,208.1,208.15
2021-07-07,209.65,212.2,209.15,211.7
2021-07-08,210.8,211.0,208.15,209.75
2021-07-09,210.6,212.9,208.58,211.95
2021-07-12,213.0,213.1,209.45,212.4
2021-07-13,212.9,215.15,212.2,213.6
2021-07-14,213.0,215.1,211.8,213.9
2021-07-15,214.15,214.95,211.8,212.2
2021-07-16,212.3,214.15,208.25,209.05
2021-07-19,208.1,208.35,201.25,201.65
2021-07-20,195.0,197.4,192.16,196.62
2021-07-21,199.0,206.0,198.02,205.65
2021-07-22,206.0,208.9,205.5,205.85
2021-07-23,207.0,209.5,206.2,209.0
2021-07-26,208.1,209.25,207.45,208.1
2021-07-27,207.5,208.05,204.3,204.4
2021-07-28,204.4,205.45,202.6,203.85
2021-07-29,204.9,205.7,203.55,204.25
2021-07-30,203.1,204.1,201.7,202.75
2021-08-02,203.9,207.5,203.6,203.85
2021-08-03,203.85,204.15,201.7,202.4
2021-08-04,203.75,204.9,203.25,204.05
2021-08-05,204.05,204.6,202.15,202.5
2021-08-06,202.5,203.3,201.4,202.35
2021-08-09,201.95,202.05,199.8,201.2
2021-08-10,201.2,201.7,200.45,200.7
2021-08-11,202.05,202.8,200.9,202.45
2021-08-12,202.45,203.8,201.95,202.6
2021-08-13,202.8,203.9,202.05,202.9
2021-08-16,202.1,202.15,200.2,200.45
2021-08-17,200.0,200.45,198.9,199.18
2021-08-18,199.02,199.06,194.68,196.74
2021-08-19,194.28,194.9,190.47,191.14
2021-08-20,191.1,192.96,189.52,192.96
2021-08-23,194.5,195.7,192.98,195.36
2021-08-24,196.74,197.2,195.6,195.96
2021-08-25,196.0,196.78,194.24,196.6
2021-08-26,195.18,197.22,194.7,196.9
2021-08-27,197.0,198.38,196.4,198.1
2021-08-30,198.3,199.62,195.96,197.46
2021-08-31,198.08,198.38,194.44,195.14
"""
return barset_from_csv_string(csv_string)
| import pytest
from investmentstk.models.barset import barset_from_csv_string
@pytest.fixture
def barset_volvo_2_months():
csv_string = """
2021-07-01,207.3,208.35,205.9,207.9
2021-07-02,208.35,209.1,206.1,206.1
2021-07-05,208.2,209.95,205.45,209.75
2021-07-06,210.0,211.85,208.1,208.15
2021-07-07,209.65,212.2,209.15,211.7
2021-07-08,210.8,211.0,208.15,209.75
2021-07-09,210.6,212.9,208.58,211.95
2021-07-12,213.0,213.1,209.45,212.4
2021-07-13,212.9,215.15,212.2,213.6
2021-07-14,213.0,215.1,211.8,213.9
2021-07-15,214.15,214.95,211.8,212.2
2021-07-16,212.3,214.15,208.25,209.05
2021-07-19,208.1,208.35,201.25,201.65
2021-07-20,195.0,197.4,192.16,196.62
2021-07-21,199.0,206.0,198.02,205.65
2021-07-22,206.0,208.9,205.5,205.85
2021-07-23,207.0,209.5,206.2,209.0
2021-07-26,208.1,209.25,207.45,208.1
2021-07-27,207.5,208.05,204.3,204.4
2021-07-28,204.4,205.45,202.6,203.85
2021-07-29,204.9,205.7,203.55,204.25
2021-07-30,203.1,204.1,201.7,202.75
2021-08-02,203.9,207.5,203.6,203.85
2021-08-03,203.85,204.15,201.7,202.4
2021-08-04,203.75,204.9,203.25,204.05
2021-08-05,204.05,204.6,202.15,202.5
2021-08-06,202.5,203.3,201.4,202.35
2021-08-09,201.95,202.05,199.8,201.2
2021-08-10,201.2,201.7,200.45,200.7
2021-08-11,202.05,202.8,200.9,202.45
2021-08-12,202.45,203.8,201.95,202.6
2021-08-13,202.8,203.9,202.05,202.9
2021-08-16,202.1,202.15,200.2,200.45
2021-08-17,200.0,200.45,198.9,199.18
2021-08-18,199.02,199.06,194.68,196.74
2021-08-19,194.28,194.9,190.47,191.14
2021-08-20,191.1,192.96,189.52,192.96
2021-08-23,194.5,195.7,192.98,195.36
2021-08-24,196.74,197.2,195.6,195.96
2021-08-25,196.0,196.78,194.24,196.6
2021-08-26,195.18,197.22,194.7,196.9
2021-08-27,197.0,198.38,196.4,198.1
2021-08-30,198.3,199.62,195.96,197.46
2021-08-31,198.08,198.38,194.44,195.14
"""
return barset_from_csv_string(csv_string)
| en | 0.471488 | 2021-07-01,207.3,208.35,205.9,207.9 2021-07-02,208.35,209.1,206.1,206.1 2021-07-05,208.2,209.95,205.45,209.75 2021-07-06,210.0,211.85,208.1,208.15 2021-07-07,209.65,212.2,209.15,211.7 2021-07-08,210.8,211.0,208.15,209.75 2021-07-09,210.6,212.9,208.58,211.95 2021-07-12,213.0,213.1,209.45,212.4 2021-07-13,212.9,215.15,212.2,213.6 2021-07-14,213.0,215.1,211.8,213.9 2021-07-15,214.15,214.95,211.8,212.2 2021-07-16,212.3,214.15,208.25,209.05 2021-07-19,208.1,208.35,201.25,201.65 2021-07-20,195.0,197.4,192.16,196.62 2021-07-21,199.0,206.0,198.02,205.65 2021-07-22,206.0,208.9,205.5,205.85 2021-07-23,207.0,209.5,206.2,209.0 2021-07-26,208.1,209.25,207.45,208.1 2021-07-27,207.5,208.05,204.3,204.4 2021-07-28,204.4,205.45,202.6,203.85 2021-07-29,204.9,205.7,203.55,204.25 2021-07-30,203.1,204.1,201.7,202.75 2021-08-02,203.9,207.5,203.6,203.85 2021-08-03,203.85,204.15,201.7,202.4 2021-08-04,203.75,204.9,203.25,204.05 2021-08-05,204.05,204.6,202.15,202.5 2021-08-06,202.5,203.3,201.4,202.35 2021-08-09,201.95,202.05,199.8,201.2 2021-08-10,201.2,201.7,200.45,200.7 2021-08-11,202.05,202.8,200.9,202.45 2021-08-12,202.45,203.8,201.95,202.6 2021-08-13,202.8,203.9,202.05,202.9 2021-08-16,202.1,202.15,200.2,200.45 2021-08-17,200.0,200.45,198.9,199.18 2021-08-18,199.02,199.06,194.68,196.74 2021-08-19,194.28,194.9,190.47,191.14 2021-08-20,191.1,192.96,189.52,192.96 2021-08-23,194.5,195.7,192.98,195.36 2021-08-24,196.74,197.2,195.6,195.96 2021-08-25,196.0,196.78,194.24,196.6 2021-08-26,195.18,197.22,194.7,196.9 2021-08-27,197.0,198.38,196.4,198.1 2021-08-30,198.3,199.62,195.96,197.46 2021-08-31,198.08,198.38,194.44,195.14 | 2.056439 | 2 |
functions/doctime.py | golete/rhino_RNproject | 0 | 6618849 | import datetime
import re
def doctime():
time = str(datetime.datetime.now())
timestamp = re.sub('\W',"",time)
return timestamp[:8]+"_"+timestamp[8:14]
| import datetime
import re
def doctime():
time = str(datetime.datetime.now())
timestamp = re.sub('\W',"",time)
return timestamp[:8]+"_"+timestamp[8:14]
| none | 1 | 2.848132 | 3 | |
step5__run_haplotype_caller_per_sample.py | macarthur-lab/gnomad-readviz | 1 | 6618850 | import logging
from tqdm import tqdm
import hail as hl
from gnomad.resources.resource_utils import DataException
from gnomad.utils.file_utils import parallel_file_exists
from tgg.batch.batch_utils import (
check_storage_bucket_region,
HG38_REF_PATHS,
localize_file,
init_arg_parser,
init_job,
run_batch,
set_gcloud_project,
)
logging.basicConfig(
format="%(asctime)s (%(name)s %(lineno)s): %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
logger = logging.getLogger("run_haplotypecaller")
logger.setLevel(logging.INFO)
EXCLUDE_INTERVALS = (
"gs://gnomad-bw2/exclude_intervals_with_non_ACGT_bases_in_GRCh38__150bp_window.bed"
)
"""
Variants to exclude when running HaplotypeCaller.
"""
PADDING_AROUND_VARIANT = 200
"""
Amount of padding to add around each variant when running HaplotypeCaller.
"""
def parse_args():
"""Parse command line args."""
p = init_arg_parser(default_cpu=1, default_billing_project="gnomad-production")
p.add_argument(
"--gcloud-project",
help="Google cloud project. Default is 'broad-mpg-gnomad'.",
default="broad-mpg-gnomad",
)
p.add_argument(
"-p",
"--output-dir",
help="Where to write haplotype caller output.",
default="gs://gnomad-bw2/gnomad_v3_1_readviz_bamout",
)
p.add_argument(
"--docker-image",
help="Docker image to use.",
default="gcr.io/broad-mpg-gnomad/gnomad-readviz@sha256:7013fc57e3471617a314b08e2bcefe4711d401f83500c5c57e9a3e79ee8efebd",
)
p.add_argument(
"--cram-and-tsv_paths-table",
help="A text file containing at least these columns: sample_id, cram_path",
default=f"step4_output_cram_and_tsv_paths_table.tsv",
)
args = p.parse_args()
return p, args
def main():
"""
Run HaplotypeCaller to generate bamouts.
Step 5 of readviz pipeline.
"""
_, args = parse_args()
hl.init(log="/dev/null", quiet=True)
project = args.gcloud_project
docker_image = args.docker_image
set_gcloud_project(project)
logger.info("Making sure input cram_and_tsv_paths_table arg is valid...")
bams = {}
samples = {}
with hl.hadoop_open(args.cram_and_tsv_paths_table) as c:
# Confirm header has all required columns
header = c.readline().strip().split("\t")
if {"sample_id", "cram_path", "crai_path", "variants_tsv_bgz"} - set(header):
raise DataException(
"%s must contain 'sample_id', 'cram_path', 'crai_path', variants_tsv_bgz' columns!"
)
for line in c:
sample, cram, crai, variants_tsv_bgz = line.strip().split("\t")
# Store output BAM path
bam = f"{args.output_prefix}/{sample}.bamout.bam"
bai = f"{args.output_prefix}/{sample}.bamout.bai"
bams[sample] = bam
# Store sample information
samples[sample] = [cram, crai, variants_tsv_bgz, bam, bai]
logger.info(
"Checking that all input crams are 'US-CENTRAL1' or multi-regional buckets..."
)
# Check that all buckets are in "US-CENTRAL1" or are multi-regional to avoid egress charges to the Batch cluster
check_storage_bucket_region(cram)
logger.info("Checking if any output bams already exist...")
bam_files_exist = parallel_file_exists(list(bams.values()))
samples_without_bams = []
for sample in bams:
if not bam_files_exist[bams[sample]]:
samples_without_bams.append(sample)
# Process samples
with run_batch(args, batch_name=f"HaplotypeCaller -bamout") as batch:
for sample in tqdm(samples_without_bams, unit="samples"):
cram, crai, variants_tsv_bgz, bam, bai = samples[sample]
j = init_job(
batch, f"readviz: {sample}", docker_image, args.cpu, args.memory,
)
j.command(
f"""gcloud -q auth activate-service-account --key-file=/gsa-key/key.json"""
)
local_exclude_intervals = localize_file(j, EXCLUDE_INTERVALS)
local_fasta = localize_file(j, HG38_REF_PATHS.fasta, use_gcsfuse=True)
local_fasta_fai = localize_file(j, HG38_REF_PATHS.fai, use_gcsfuse=True)
localize_file(j, HG38_REF_PATHS.dict, use_gcsfuse=True)
local_tsv_bgz = localize_file(j, variants_tsv_bgz)
local_cram_path = localize_file(j, cram)
j.command(
f"""echo --------------
echo "Start - time: $(date)"
df -kh
# 1) Convert variants_tsv_bgz to sorted interval list
gunzip -c "{local_tsv_bgz}" | awk '{{ OFS="\t" }} {{ print( "chr"$1, $2, $2 ) }}' | bedtools slop -b {PADDING_AROUND_VARIANT} -g {local_fasta_fai} > variant_windows.bed
# Sort the .bed file so that chromosomes are in the same order as in the input_cram file.
# Without this, if the input_cram has a different chromosome ordering (eg. chr1, chr10, .. vs. chr1, chr2, ..)
# than the interval list passed to GATK tools' -L arg, then GATK may silently skip some of regions in the -L intervals.
# The sort is done by first retrieving the input_cram header and passing it to GATK BedToIntervalList.
java -Xms2g -jar /gatk/gatk.jar PrintReadsHeader \
--gcs-project-for-requester-pays {project} \
-R {local_fasta} \
-I "{local_cram_path}" \
-O header.bam
java -Xms2g -jar /gatk/gatk.jar BedToIntervalList \
--SORT true \
--SEQUENCE_DICTIONARY header.bam \
--INPUT variant_windows.bed \
--OUTPUT variant_windows.interval_list
# 2) Get reads from the input_cram for the intervals in variant_windows.interval_list
time java -XX:GCTimeLimit=50 -XX:GCHeapFreeLimit=10 -XX:+DisableAttachMechanism -XX:MaxHeapSize=2000m -Xmx30000m \
-jar /gatk/GATK35.jar \
-T HaplotypeCaller \
-R {local_fasta} \
-I "{local_cram_path}" \
-L variant_windows.interval_list \
-XL {local_exclude_intervals} \
--disable_auto_index_creation_and_locking_when_reading_rods \
-ERC GVCF \
--max_alternate_alleles 3 \
-variant_index_parameter 128000 \
-variant_index_type LINEAR \
--read_filter OverclippedRead \
-bamout "{sample}.bamout.bam" \
-o "{sample}.gvcf" |& grep -v "^DEBUG"
bgzip "{sample}.gvcf"
tabix "{sample}.gvcf.gz"
gsutil -m cp "{sample}.bamout.bam" {args.output_dir}
gsutil -m cp "{sample}.bamout.bai" {args.output_dir}
gsutil -m cp "{sample}.gvcf.gz" {args.output_dir}
gsutil -m cp "{sample}.gvcf.gz.tbi" {args.output_dir}
ls -lh
echo --------------; free -h; df -kh; uptime; set +xe; echo "Done - time: $(date)"; echo --------------
"""
)
if __name__ == "__main__":
main()
| import logging
from tqdm import tqdm
import hail as hl
from gnomad.resources.resource_utils import DataException
from gnomad.utils.file_utils import parallel_file_exists
from tgg.batch.batch_utils import (
check_storage_bucket_region,
HG38_REF_PATHS,
localize_file,
init_arg_parser,
init_job,
run_batch,
set_gcloud_project,
)
logging.basicConfig(
format="%(asctime)s (%(name)s %(lineno)s): %(message)s",
datefmt="%m/%d/%Y %I:%M:%S %p",
)
logger = logging.getLogger("run_haplotypecaller")
logger.setLevel(logging.INFO)
EXCLUDE_INTERVALS = (
"gs://gnomad-bw2/exclude_intervals_with_non_ACGT_bases_in_GRCh38__150bp_window.bed"
)
"""
Variants to exclude when running HaplotypeCaller.
"""
PADDING_AROUND_VARIANT = 200
"""
Amount of padding to add around each variant when running HaplotypeCaller.
"""
def parse_args():
"""Parse command line args."""
p = init_arg_parser(default_cpu=1, default_billing_project="gnomad-production")
p.add_argument(
"--gcloud-project",
help="Google cloud project. Default is 'broad-mpg-gnomad'.",
default="broad-mpg-gnomad",
)
p.add_argument(
"-p",
"--output-dir",
help="Where to write haplotype caller output.",
default="gs://gnomad-bw2/gnomad_v3_1_readviz_bamout",
)
p.add_argument(
"--docker-image",
help="Docker image to use.",
default="gcr.io/broad-mpg-gnomad/gnomad-readviz@sha256:7013fc57e3471617a314b08e2bcefe4711d401f83500c5c57e9a3e79ee8efebd",
)
p.add_argument(
"--cram-and-tsv_paths-table",
help="A text file containing at least these columns: sample_id, cram_path",
default=f"step4_output_cram_and_tsv_paths_table.tsv",
)
args = p.parse_args()
return p, args
def main():
"""
Run HaplotypeCaller to generate bamouts.
Step 5 of readviz pipeline.
"""
_, args = parse_args()
hl.init(log="/dev/null", quiet=True)
project = args.gcloud_project
docker_image = args.docker_image
set_gcloud_project(project)
logger.info("Making sure input cram_and_tsv_paths_table arg is valid...")
bams = {}
samples = {}
with hl.hadoop_open(args.cram_and_tsv_paths_table) as c:
# Confirm header has all required columns
header = c.readline().strip().split("\t")
if {"sample_id", "cram_path", "crai_path", "variants_tsv_bgz"} - set(header):
raise DataException(
"%s must contain 'sample_id', 'cram_path', 'crai_path', variants_tsv_bgz' columns!"
)
for line in c:
sample, cram, crai, variants_tsv_bgz = line.strip().split("\t")
# Store output BAM path
bam = f"{args.output_prefix}/{sample}.bamout.bam"
bai = f"{args.output_prefix}/{sample}.bamout.bai"
bams[sample] = bam
# Store sample information
samples[sample] = [cram, crai, variants_tsv_bgz, bam, bai]
logger.info(
"Checking that all input crams are 'US-CENTRAL1' or multi-regional buckets..."
)
# Check that all buckets are in "US-CENTRAL1" or are multi-regional to avoid egress charges to the Batch cluster
check_storage_bucket_region(cram)
logger.info("Checking if any output bams already exist...")
bam_files_exist = parallel_file_exists(list(bams.values()))
samples_without_bams = []
for sample in bams:
if not bam_files_exist[bams[sample]]:
samples_without_bams.append(sample)
# Process samples
with run_batch(args, batch_name=f"HaplotypeCaller -bamout") as batch:
for sample in tqdm(samples_without_bams, unit="samples"):
cram, crai, variants_tsv_bgz, bam, bai = samples[sample]
j = init_job(
batch, f"readviz: {sample}", docker_image, args.cpu, args.memory,
)
j.command(
f"""gcloud -q auth activate-service-account --key-file=/gsa-key/key.json"""
)
local_exclude_intervals = localize_file(j, EXCLUDE_INTERVALS)
local_fasta = localize_file(j, HG38_REF_PATHS.fasta, use_gcsfuse=True)
local_fasta_fai = localize_file(j, HG38_REF_PATHS.fai, use_gcsfuse=True)
localize_file(j, HG38_REF_PATHS.dict, use_gcsfuse=True)
local_tsv_bgz = localize_file(j, variants_tsv_bgz)
local_cram_path = localize_file(j, cram)
j.command(
f"""echo --------------
echo "Start - time: $(date)"
df -kh
# 1) Convert variants_tsv_bgz to sorted interval list
gunzip -c "{local_tsv_bgz}" | awk '{{ OFS="\t" }} {{ print( "chr"$1, $2, $2 ) }}' | bedtools slop -b {PADDING_AROUND_VARIANT} -g {local_fasta_fai} > variant_windows.bed
# Sort the .bed file so that chromosomes are in the same order as in the input_cram file.
# Without this, if the input_cram has a different chromosome ordering (eg. chr1, chr10, .. vs. chr1, chr2, ..)
# than the interval list passed to GATK tools' -L arg, then GATK may silently skip some of regions in the -L intervals.
# The sort is done by first retrieving the input_cram header and passing it to GATK BedToIntervalList.
java -Xms2g -jar /gatk/gatk.jar PrintReadsHeader \
--gcs-project-for-requester-pays {project} \
-R {local_fasta} \
-I "{local_cram_path}" \
-O header.bam
java -Xms2g -jar /gatk/gatk.jar BedToIntervalList \
--SORT true \
--SEQUENCE_DICTIONARY header.bam \
--INPUT variant_windows.bed \
--OUTPUT variant_windows.interval_list
# 2) Get reads from the input_cram for the intervals in variant_windows.interval_list
time java -XX:GCTimeLimit=50 -XX:GCHeapFreeLimit=10 -XX:+DisableAttachMechanism -XX:MaxHeapSize=2000m -Xmx30000m \
-jar /gatk/GATK35.jar \
-T HaplotypeCaller \
-R {local_fasta} \
-I "{local_cram_path}" \
-L variant_windows.interval_list \
-XL {local_exclude_intervals} \
--disable_auto_index_creation_and_locking_when_reading_rods \
-ERC GVCF \
--max_alternate_alleles 3 \
-variant_index_parameter 128000 \
-variant_index_type LINEAR \
--read_filter OverclippedRead \
-bamout "{sample}.bamout.bam" \
-o "{sample}.gvcf" |& grep -v "^DEBUG"
bgzip "{sample}.gvcf"
tabix "{sample}.gvcf.gz"
gsutil -m cp "{sample}.bamout.bam" {args.output_dir}
gsutil -m cp "{sample}.bamout.bai" {args.output_dir}
gsutil -m cp "{sample}.gvcf.gz" {args.output_dir}
gsutil -m cp "{sample}.gvcf.gz.tbi" {args.output_dir}
ls -lh
echo --------------; free -h; df -kh; uptime; set +xe; echo "Done - time: $(date)"; echo --------------
"""
)
if __name__ == "__main__":
main()
| en | 0.418833 | Variants to exclude when running HaplotypeCaller. Amount of padding to add around each variant when running HaplotypeCaller. Parse command line args. Run HaplotypeCaller to generate bamouts. Step 5 of readviz pipeline. # Confirm header has all required columns # Store output BAM path # Store sample information # Check that all buckets are in "US-CENTRAL1" or are multi-regional to avoid egress charges to the Batch cluster # Process samples gcloud -q auth activate-service-account --key-file=/gsa-key/key.json echo -------------- echo "Start - time: $(date)" df -kh # 1) Convert variants_tsv_bgz to sorted interval list gunzip -c "{local_tsv_bgz}" | awk '{{ OFS="\t" }} {{ print( "chr"$1, $2, $2 ) }}' | bedtools slop -b {PADDING_AROUND_VARIANT} -g {local_fasta_fai} > variant_windows.bed # Sort the .bed file so that chromosomes are in the same order as in the input_cram file. # Without this, if the input_cram has a different chromosome ordering (eg. chr1, chr10, .. vs. chr1, chr2, ..) # than the interval list passed to GATK tools' -L arg, then GATK may silently skip some of regions in the -L intervals. # The sort is done by first retrieving the input_cram header and passing it to GATK BedToIntervalList. java -Xms2g -jar /gatk/gatk.jar PrintReadsHeader \ --gcs-project-for-requester-pays {project} \ -R {local_fasta} \ -I "{local_cram_path}" \ -O header.bam java -Xms2g -jar /gatk/gatk.jar BedToIntervalList \ --SORT true \ --SEQUENCE_DICTIONARY header.bam \ --INPUT variant_windows.bed \ --OUTPUT variant_windows.interval_list # 2) Get reads from the input_cram for the intervals in variant_windows.interval_list time java -XX:GCTimeLimit=50 -XX:GCHeapFreeLimit=10 -XX:+DisableAttachMechanism -XX:MaxHeapSize=2000m -Xmx30000m \ -jar /gatk/GATK35.jar \ -T HaplotypeCaller \ -R {local_fasta} \ -I "{local_cram_path}" \ -L variant_windows.interval_list \ -XL {local_exclude_intervals} \ --disable_auto_index_creation_and_locking_when_reading_rods \ -ERC GVCF \ --max_alternate_alleles 3 \ -variant_index_parameter 128000 \ -variant_index_type LINEAR \ --read_filter OverclippedRead \ -bamout "{sample}.bamout.bam" \ -o "{sample}.gvcf" |& grep -v "^DEBUG" bgzip "{sample}.gvcf" tabix "{sample}.gvcf.gz" gsutil -m cp "{sample}.bamout.bam" {args.output_dir} gsutil -m cp "{sample}.bamout.bai" {args.output_dir} gsutil -m cp "{sample}.gvcf.gz" {args.output_dir} gsutil -m cp "{sample}.gvcf.gz.tbi" {args.output_dir} ls -lh echo --------------; free -h; df -kh; uptime; set +xe; echo "Done - time: $(date)"; echo -------------- | 2.087651 | 2 |