id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
118875 |
import re
# FIXME: depends on builtin_signed
######## COMMON API ##########
E_INTEGER = 'INT'
E_STRING = 'STR'
E_BIN_RAW = 'RAW'
E_BLOCK = 'BLK'
E_BUILTIN_FUNC = 'BUILTIN'
E_NEEDS_WORK = 'NEEDS_WORK'
def e_needs_work(length=None):
return {'len': length, 'final': False, 'type':E_NEEDS_WORK, 'data': None}
def e_integer(n):
return {'len': None, 'final': True, 'type':E_INTEGER, 'data':n}
def e_bin_raw(bitlen, x):
return {'len':bitlen, 'final': True, 'type':E_BIN_RAW, 'data': x}
def e_block(assignments, value, labels=None):
if not labels:
labels = {}
return {'len': value['len'], 'final': value['final'], 'type':E_BLOCK,
'data': {'vars':assignments, 'val':value, 'labels':labels}}
def e_builtin_func(paramsnum, f):
return {'len': None, 'final': True, 'type':E_BUILTIN_FUNC,
'data': {'paramsnum': paramsnum, 'func': f}}
######## / COMMON API ##########
################################
# Yuck! Copy-pasta'd from bin.py
#
# It would be nice to make this an abstract module that is able to
# evaluate other code to call bin.s in a normal way.
# Like U, but handles negative numbers with 2's complement
def builtin_signed(ix, jx):
if not ix['final']:
return e_needs_work()
if not jx['final']:
return e_needs_work(ix['data'])
if ix['type'] != E_INTEGER or jx['type'] != E_INTEGER:
raise Exception("S takes Integer, Integer arguments, got " + str(ix) + ", " + str(jx))
if ix['data'] < 0:
raise Exception("Bitsize must be non-negative")
if jx['data'] >= 2**((ix['data']*8)-1):
raise Exception("Argument is too big for the space")
if jx['data'] < -1 * (2**((ix['data']*8)-1)):
raise Exception("Argument is too negative for the space, " + str(jx))
num = jx['data']
if num < 0:
num = (1 << ix['data']*8) + num
out = []
bytes_rem = ix['data']
bs = hex(num)[2:]
if len(bs) % 2 != 0:
bs = '0' + bs
for i in reversed(range(0, len(bs), 2)):
byt = int(bs[i:i+2], 16)
out.append(byt)
bytes_rem -= 1
while bytes_rem > 0:
out.append(0)
bytes_rem -= 1
return e_bin_raw(ix['data'], out)
######## / YUCK ##########
nop = 0x90
mov_reg = {
'rax': 0xb8,
'rcx': 0xb9,
'rdx': 0xba,
'rbx': 0xbb,
}
rbp_arg = re.compile(r'\[rbp\+(?P<mul>[0-9]+)\]')
rbp_dst_reg = {
'rcx': 0x4d,
'rdx': 0x55,
'rsi': 0x75,
'rdi': 0x7d,
}
def builtin_mov(dx, sx):
if not dx['final'] or not sx['final']:
return e_needs_work(5)
if dx['type'] == E_STRING and sx['type'] == E_STRING:
if dx['data'] == 'rbp' and sx['data'] == 'rsp':
return e_bin_raw(5, [0x48, 0x89, 0xe5, nop, nop])
if dx['data'] == 'rsp' and sx['data'] == 'rbp':
return e_bin_raw(5, [0x48, 0x89, 0xec, nop, nop])
if dx['data'] == 'rdx' and sx['data'] == 'rax':
return e_bin_raw(3, [0x48, 0x89, 0xc2])
m = rbp_arg.match(sx['data'])
if m:
start = [0x48, 0x8b]
if dx['data'] not in rbp_dst_reg:
raise Exception("Unknown rbp offset dest reg")
if m.group('mul') in ('16', '24'):
mul = int(m.group('mul'))
else:
raise Exception("Unknown rbp offset mul")
return e_bin_raw(4, start + [rbp_dst_reg[dx['data']], mul])
if dx['type'] == E_STRING and sx['type'] == E_INTEGER:
src = builtin_signed(e_integer(4), sx)
if dx['data'] not in mov_reg:
raise Exception("Unknown mov register " + str(dx))
return e_bin_raw(5, [mov_reg[dx['data']]] + src['data'])
raise Exception("Unknown mov type: " + str(sx['data']) + " -> " + str(dx['data']))
push_reg = {
'rax': 0x50,
'rcx': 0x51,
'rdx': 0x52,
'rbx': 0x53,
'rbp': 0x55,
'rsi': 0x56,
'rdi': 0x57,
}
def builtin_push(sx):
if not sx['final']:
return e_needs_work(5)
if sx['type'] == E_STRING:
if sx['data'] not in push_reg:
raise Exception("Unknown push register " + str(sx))
return e_bin_raw(5, [push_reg[sx['data']], nop, nop, nop, nop])
if sx['type'] == E_INTEGER:
if sx['data'] < -128 or sx['data'] > 127:
src = builtin_signed(e_integer(4), sx)
return e_bin_raw(5, [0x68] + src['data'])
else:
src = builtin_signed(e_integer(1), sx)
return e_bin_raw(5, [0x6a] + src['data'] + [nop, nop, nop])
raise Exception("Unknown push type")
pop_reg = {
'rax': 0x58,
'rcx': 0x59,
'rdx': 0x5a,
'rbx': 0x5b,
'rbp': 0x5d,
'rsi': 0x5e,
'rdi': 0x5f,
}
def builtin_pop(dx):
if not dx['final']:
return e_needs_work(1)
if dx['type'] == E_STRING:
if dx['data'] not in pop_reg:
raise Exception("Unknown pop register " + str(dx))
return e_bin_raw(1, [pop_reg[dx['data']]])
raise Exception("Unknown pop type")
builtins_asm = e_block([
('mov', e_builtin_func(2, builtin_mov)),
('push', e_builtin_func(1, builtin_push)),
('pop', e_builtin_func(1, builtin_pop)),
],
e_bin_raw(0, []))
# perhaps build() could return a function that itself takes modules as
# arguments?
#
# This might not actually be a good idea because we'd be unable to do
# anything with non-builtin functions given the lack of an evaluator.
#
# Perhaps we should accept an evaluator as an argument and return an
# object that may or may not use the evaluator internally?
def build():
return builtins_asm
| StarcoderdataPython |
128657 | <filename>streamlit/streamlit_sample.py
#streamlitをpipでインストール後、
# streamlit run sample-streamlit.pyで実行
import streamlit as st
import numpy as np
import pandas as pd
import requests
from typing import Any
st.title("HIT & BLOW")
URL = "https://damp-earth-70561.herokuapp.com"
def get_room(session: requests.Session, room_id: int) -> Any:
"""<room_id>のroom情報取得
"""
url_get_room = URL + "/rooms/" + str(room_id)
result = session.get(url_get_room)
if result.status_code == requests.codes.ok:
return result.json()
def make_room_table(get_room_result):
get_room_result = pd.DataFrame(get_room_result,index=['i',])
st.table(get_room_result)
def main() -> None:
session = requests.Session()
room_id = st.number_input("調べたいルームIDを入力してください", 1, 10000)
is_pushed = st.button("Get Room Information")
if is_pushed:
get_room_result = get_room(session, room_id)
# st.write(get_room_result)
make_room_table(get_room_result)
if __name__ == "__main__":
main() | StarcoderdataPython |
187578 | <reponame>Doun92/UNIL_DH_memoire<filename>script_11/main.py
"""
Ce script unit tous les autres scrits qui s'occupent de tâches plus ponctuelles.
Il parcourt chaque mot, lettre par lettre ou syllabe par syllabe, selon les particularités de chacun.
auteur : <NAME>
license : license UNIL
"""
class EvolutionPhonetique:
def __init__(self):
return
def evolution_phonetique(self):
from syllabifier import Syllabifier
syllabifier = Syllabifier()
from AA1_syllabe_initiale import SyllabeInitiale
syllabe_initiale = SyllabeInitiale()
from AA2_syllabe_contrepénultième import SyllabeContrepenultieme
syllabe_contrepenultieme = SyllabeContrepenultieme()
from AA3_syllabe_contrefinale import SyllabeContrefinale
syllabe_contrefinale = SyllabeContrefinale()
from AA4_syllabe_antépénultième_tonique import SyllabeAntePenultieme
syllabe_ante_penultieme = SyllabeAntePenultieme()
from AA5_syllabe_pénultième import SyllabePenultieme
syllabe_penultieme = SyllabePenultieme()
from AA6_syllabe_finale import SyllabeFinale
syllabe_finale = SyllabeFinale()
from AA7_conjugaison import Conjugaison
conjugaison1 = Conjugaison()
syllabes = syllabifier.syllabify(self)
print(syllabes)
changements = list()
#Importation de librairies diverses
import re
import collections
#Première syllabe et/ou préfixe
if len(syllabes) > 0:
changements.append(syllabe_initiale.syllabe_initiale(self))
#Syllabe contrepénultième
if len(syllabes) > 5:
changements.append(syllabe_contrepenultieme.syllabe_contrepenultieme(self))
#Syllabe contrefinale
if len(syllabes) > 4:
changements.append(syllabe_contrefinale.syllabe_contrefinale(self))
#Anté-pénultième syllabe
if len(syllabes) > 3:
changements.append(syllabe_ante_penultieme.syllabe_ante_penultieme(self))
#Pénultième syllabe
if len(syllabes) > 2:
changements.append(syllabe_penultieme.syllabe_penultieme(self))
#Dernière syllabe
if len(syllabes) > 1:
changements.append(syllabe_finale.syllabe_finale(self))
flat_list = [item for sublist in changements for item in sublist]
# print(flat_list)
output = "".join(flat_list)
# print(output)
output = output.lower()
return output
# def main():
#
# #Importation de librairies diverses
# import re
# import collections
#
#
#
# #Importation du dictionnaire de tous les mots du texte
# # from dictionary import dict
# # from Mariale_1_dict import dict
# from Moine_dict import dict
# keys = dict.keys()
# values = dict.values()
# # print(keys)
# # print(values)
#
# every_word = open('AA_every_word.txt', 'w', encoding = 'utf-8')
# catch = open('AA_catch.txt', 'w+', encoding = 'utf-8')
# dont_catch = open('AA_dont_catch.txt', 'w+', encoding = 'utf-8')
#
# # print(len(dict_Marie))
#
# for key in keys:
# # print(key)
# # print(dict[key])
# print_final = EvolutionPhonetique.evolution_phonetique(key)
#
# every_word.write('\n %s > %s \n \n' % (key, print_final) + '----------------------------------------- \n' )
# # print(print_final)
#
# if print_final == dict[key] or print_final in dict[key] or print_final in dict[key][0]: #Ce serait ici qu'il faudrait modifier
# catch.write('\n %s > %s == %s \n \n' % (key, print_final, dict[key]) + '----------------------------------------- \n')
# else:
# dont_catch.write(('\n %s > %s != %s \n \n' % (key, print_final, dict[key]) + '----------------------------------------- \n'))
#
# main()
| StarcoderdataPython |
3264619 | <filename>src/Day 3/feature/matching.py
import cv2
from matplotlib import pyplot as plt
# Load the images.
img0 = cv2.imread(r'C:\Users\harrizazham98\Desktop\OpenCVForPython\resources\Day 3\kfc2.png',
cv2.IMREAD_GRAYSCALE)
img1 = cv2.imread(r'C:\Users\harrizazham98\Desktop\OpenCVForPython\resources\Day 3\kfc1.jpg',
cv2.IMREAD_GRAYSCALE)
# Perform SIFT feature detection and description.
sift = cv2.xfeatures2d.SIFT_create()
kp0, des0 = sift.detectAndCompute(img0, None)
kp1, des1 = sift.detectAndCompute(img1, None)
# Perform brute-force matching.
bf = cv2.BFMatcher()
matches = bf.knnMatch(des0,des1,k=2)
#iterate through the descriptors and determine
#whether they are a match or not, and then calculate the quality of this match (distance) and
#sort the matches so that we can display the top n matches with a degree of confidence that
#they are, in fact, matching
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
# cv.drawMatchesKnn expects list of lists as matches.
img3 = cv2.drawMatchesKnn(img0,kp0,img1,kp1,good,None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
plt.imshow(img3),plt.show() | StarcoderdataPython |
1774656 | <filename>TP_04/ejercicio_7/script_2/retrieval.py
import re
import struct
from importer import *
from normalizer import *
from entity_extractor import *
from constants import *
class Retrieval:
def __init__(self, metadata, on_memory=False, avoid_sets=False, avoid_skips=False):
self.metadata = metadata
self.importer = Importer(metadata["TERMS_SIZE"], metadata["DOCNAMES_SIZE"])
self.vocabulary = self.importer.read_vocabulary()
self.ids_docnames = self.importer.read_docnames_ids_file()
self.normalizer = Normalizer(metadata["STEMMING_LANGUAGE"])
self.entity_extractor = Entity_Extractor(metadata["STEMMING_LANGUAGE"])
self.avoid_sets = avoid_sets
self.avoid_skips = avoid_skips
self.on_memory = on_memory
if self.on_memory:
self.inverted_index = self.importer.read_inverted_index(self.vocabulary)
## Normalization
def normalize_term(self, term):
normalized_term = self.normalizer.normalize(term)
return normalized_term
def normalize_terms_without_entities(self, terms):
normalized_terms = []
for term in terms:
normalized_term = self.normalize_term(term)
normalized_terms.append(normalized_term)
return normalized_terms
def normalize_terms_with_entities(self, term):
pass
"""
if self.metadata["EXTRACT_ENTITIES"]:
rest, entities_list = self.entity_extractor.extract_entities(term)
if len(entities_list) >= 1:
#if rest != "":?
#if len(entities_list) >= 2: ?
#if entity != term? #Doesnt work for U.S.A > usa
entity = entities_list[0]
return entity
return self.normalizer.normalize(term)
#Si tiene entidades que no estan en el vocabulary, intentar sin detectar la entidad.
"""
def normalize_terms(self, terms):
if self.metadata["EXTRACT_ENTITIES"]:
# print("Pendiente")
return self.normalize_terms_without_entities(terms)
else:
return self.normalize_terms_without_entities(terms)
##
def query(self, user_input):
ands = user_input.count(AND_SYMBOL)
normalized_terms = self.normalize_terms(user_input.split(AND_SYMBOL))
if not self.all_terms_in_vocabulary(normalized_terms):
return []
if (ands) == 1:
if self.avoid_sets:
if self.avoid_skips:
return self.and_query(normalized_terms)
else:
return self.and_query_skips(normalized_terms)
else:
return sorted(list(self.and_query_sets(normalized_terms)))
else:
if self.avoid_sets:
if self.avoid_skips:
return self.and_query(normalized_terms)
else:
return self.and_query_skips(normalized_terms)
else:
return sorted(list(self.and_query_sets(normalized_terms)))
return []
## Sets Querys
def and_query_sets(self, terms):
posting1 = self.get_posting(terms[0])
posting2 = self.get_posting(terms[1])
if len(terms) == 2:
return set(posting1).intersection(set(posting2))
else:
posting3 = self.get_posting(terms[2])
return set(posting1).intersection(set(posting2)).intersection(set(posting3))
##
## Handmade Querys
def get_terms_in_order(self, terms):
df_terms = {}
for term in terms:
df_term, _, _ = self.vocabulary[term]
if df_term not in df_terms.keys():
df_terms[df_term] = term
else:
df_terms[df_term] = [df_terms[df_term], term]
terms_sorted_by_df = []
for key in sorted(df_terms.keys()):
if type(df_terms[key]) == list:
terms_sorted_by_df.extend(df_terms[key])
else:
terms_sorted_by_df.append(df_terms[key])
if len(terms_sorted_by_df) == 2:
return terms_sorted_by_df[0], terms_sorted_by_df[1], None
else:
return terms_sorted_by_df[0], terms_sorted_by_df[1], terms_sorted_by_df[2]
def search_with_skips(
self, searching_doc_id, skips_term2, start_index_pointer, df_term
):
previous_pointer = start_index_pointer
for doc_id, pointer_index in skips_term2:
if doc_id == searching_doc_id:
return True
if doc_id > searching_doc_id:
for doc_id in self.importer.get_posting_part(previous_pointer, K_SKIPS):
if doc_id == searching_doc_id:
return True
if doc_id > searching_doc_id:
return False
previous_pointer = pointer_index
# Si salio del for, es porque todos los doc ids son menores, si está, esta al final.
# La primera posicion que lee, es el valor de la posting, por eso 1 +
return searching_doc_id in self.importer.get_posting_part(
previous_pointer, 1 + (df_term % K_SKIPS)
)
def and_query_skips(self, terms):
term1, term2, term3 = self.get_terms_in_order(terms)
df_term2, skip_pointer_term2, index_pointer_term2 = self.vocabulary[term2]
posting_term1 = self.get_posting(term1)
skips_term2 = self.importer.get_skip(skip_pointer_term2, df_term2)
two_term_result = self.two_term_and_query_skips(
posting_term1, index_pointer_term2, skips_term2, df_term2
)
if term3 == None:
return two_term_result
else:
if two_term_result == []:
return []
df_term3, skip_pointer_term3, index_pointer_term3 = self.vocabulary[term3]
skips_term3 = self.importer.get_skip(skip_pointer_term3, df_term3)
return self.two_term_and_query_skips(
two_term_result, index_pointer_term3, skips_term3, df_term3
)
def two_term_and_query_skips(
self, posting_term1, start_index_pointer_term2, skips_term2, df_term
):
result = []
for doc_id in posting_term1:
found = self.search_with_skips(
doc_id, skips_term2, start_index_pointer_term2, df_term
)
if found:
result.append(doc_id)
return result
def search(
self, searching_doc_id, posting
):
for doc_id in posting:
if doc_id == searching_doc_id:
return True
if doc_id > searching_doc_id:
return False
return False
def and_query(self, terms):
term1, term2, term3 = self.get_terms_in_order(terms)
posting_term1 = self.get_posting(term1)
posting_term2 = self.get_posting(term2)
two_term_result = self.two_term_and_query(posting_term1, posting_term2)
if term3 == None:
return two_term_result
else:
if two_term_result == []:
return []
posting_term3 = self.get_posting(term3)
return self.two_term_and_query(two_term_result, posting_term3)
def two_term_and_query(
self, posting_term1, posting_term2
):
result = []
for doc_id in posting_term1:
found = self.search(
doc_id, posting_term2
)
if found:
result.append(doc_id)
return result
##
def get_posting(self, term):
if self.on_memory:
try:
return self.inverted_index[term]
except:
if self.avoid_sets:
return []
else:
return {}
else:
try:
df_term, skips_pointer, index_pointer = self.vocabulary[term]
return self.importer.read_posting(index_pointer, df_term)
except:
if self.avoid_sets:
return []
else:
return {}
def all_terms_in_vocabulary(self, terms):
for term in terms:
if term not in self.vocabulary.keys():
return False
return True
def get_skip(self, term):
try:
normalized_term = self.normalize_term(term)
df_term, skips_pointer, index_pointer = self.vocabulary[normalized_term]
return self.importer.get_skip(skips_pointer, df_term)
except:
return []
def get_vocabulary(self):
return self.vocabulary.keys()
| StarcoderdataPython |
3256309 | <gh_stars>0
import random
import unittest
import unittest.mock as mock
import learning
class TestLearning(unittest.TestCase):
def test_get_random_belief_bit(self):
with mock.patch('random.uniform', mock.Mock()) as mock_uniform:
mock_uniform.return_value = 0
bit = learning.get_random_belief_bit(0.4)
self.assertEqual(bit, 0)
mock_uniform.assert_called_once()
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1758143 | #!/usr/bin/env python3
# Copyright (C) 2016 Intel Corporation
#
# SPDX-License-Identifier: MIT
#
import unittest
import logging
import os
from common import setup_sys_path, TestBase
setup_sys_path()
from oeqa.core.exception import OEQAMissingVariable
from oeqa.core.utils.test import getCaseMethod, getSuiteCasesNames
class TestData(TestBase):
modules = ['data']
def test_data_fail_missing_variable(self):
expectedException = "oeqa.core.exception.OEQAMissingVariable"
tc = self._testLoader(modules=self.modules)
results = tc.runTests()
self.assertFalse(results.wasSuccessful())
for test, data in results.errors:
expect = False
if expectedException in data:
expect = True
self.assertTrue(expect)
def test_data_fail_wrong_variable(self):
expectedError = 'AssertionError'
d = {'IMAGE' : 'core-image-sato', 'ARCH' : 'arm'}
tc = self._testLoader(d=d, modules=self.modules)
results = tc.runTests()
self.assertFalse(results.wasSuccessful())
for test, data in results.failures:
expect = False
if expectedError in data:
expect = True
self.assertTrue(expect)
def test_data_ok(self):
d = {'IMAGE' : 'core-image-minimal', 'ARCH' : 'x86', 'MACHINE' : 'qemuarm'}
tc = self._testLoader(d=d, modules=self.modules)
self.assertEqual(True, tc.runTests().wasSuccessful())
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1593 | <reponame>Tillsten/skultrafast
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 17 21:33:24 2015
@author: Tillsten
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
tableau20 = [(r/255., g/255., b/255.) for r,g,b, in tableau20]
#plt.rcParams['savefig.dpi'] = 110
#plt.rcParams['font.family'] = 'Vera Sans'
out_ticks = {'xtick.direction': 'out',
'xtick.major.width': 1.5,
'xtick.minor.width': 1,
'xtick.major.size': 6,
'xtick.minor.size': 3,
'xtick.minor.visible': True,
'ytick.direction': 'out',
'ytick.major.width': 1.5,
'ytick.minor.width': 1,
'ytick.major.size': 6,
'ytick.minor.size': 3,
'ytick.minor.visible': True,
'axes.spines.top': False,
'axes.spines.right': False,
'text.hinting': True,
'axes.titlesize': 'xx-large',
'axes.titleweight': 'semibold',
}
plt.figure(figsize=(6,4))
with plt.style.context(out_ticks):
ax = plt.subplot(111)
x = np.linspace(0, 7, 1000)
y = np.exp(-x/1.5)*np.cos(x/1*(2*np.pi))#*np.cos(x/0.05*(2*np.pi))
l, = plt.plot(x, np.exp(-x/1.5), lw=0.5, color='grey')
l, = plt.plot(x, -np.exp(-x/1.5), lw=0.5, color='grey')
l, = plt.plot(x, y, lw=1.1)
#l.set_clip_on(0)
plt.tick_params(which='both', top=False, right=False)
plt.margins(0.01)
ax.text(7, 1, r'$y(t)=\exp\left(-t/1.5\right)\cos(\omega_1t)\cos(\omega_2t)$',
fontsize=18, va='top', ha='right')
#plt.title("Hallo")
plt.setp(plt.gca(), xlabel='Time [s]', ylabel='Amplitude')
ax = plt.axes([0.57, 0.25, 0.3, .2])
#ax.plot(np.fft.fftfreq(x.size)[:y.size/2], abs(np.fft.fft(y))[:y.size/2])
ax.fill_between(np.fft.fftfreq(x.size, x[1]-x[0])[:y.size/2],
abs(np.fft.fft(y))[:y.size/2], alpha=0.2, color='r')
ax.set_xlim(0, 10)
ax.set_xlabel("Frequency")
ax.xaxis.labelpad = 1
plt.locator_params(nbins=4)
plt.tick_params(which='both', top=False, right=False)
plt.tick_params(which='minor', bottom=False, left=False)
#plt.grid(1, axis='y', linestyle='-', alpha=0.3, lw=.5)
plt.show()
| StarcoderdataPython |
1670812 | <filename>genalg/sonicfeatures.py
import librosa
import numpy as np
def silence_ratio(filename, thresh=20):
"""Ratio of quiet frames to not quiet frames."""
# read audiofile
y, sr = librosa.load(filename, mono=True, sr=44100)
# loudness
S, phase = librosa.magphase(librosa.stft(y))
log_S = librosa.logamplitude(np.sum(S, axis=0), ref_power=1.0)
# ratio
ratio = float(np.sum(log_S < thresh)) / log_S.shape[0]
# return
return ratio
def test_silence_ratio():
f1 = "../resources/audio/sample1.wav"
print silence_ratio(f1)
if __name__ == "__main__":
test_silence_ratio()
| StarcoderdataPython |
1694938 | <reponame>GYosifov88/Python-Fundamentals
# def factorial (a, b):
# first_num = 1
# second_num = 1
# while a >= 1:
# first_num = first_num * a
# a -= 1
# while b >= 1:
# second_num = second_num * b
# b -= 1
# final_result = first_num / second_num
# print (f'{final_result:.2f}')
#
#
# first_digit = int(input())
# second_digit = int(input())
#
# factorial(first_digit, second_digit)
def factorial(num):
return 1 if num == 0 or num == 1 else num * factorial(num -1)
first_digit = int(input())
second_digit = int(input())
result = factorial(first_digit) / factorial(second_digit)
print(f'{result:.2f}') | StarcoderdataPython |
136381 | """
Command line utility for converting from pdf to text
- Part of the basic business (decode parameters, open file and main function) are in fucntion, not classes.
"""
import time
import threading
import json
import sys
import getopt
import os
import signal
from common.logmanager import LogManager
from pdfutils.pdfmanager import PdfManager
# Initialize logger: Global logger variable for all this module
logger = LogManager()
def show_help():
"""
Show command help
"""
help_line = "pdf2txt.py <-i input_file | -o output_file> [-h] [-p]"
logger.info(help_line)
def read_pdf_file(input_file):
"""
Read the PDF file and return content as bytes
:param: input_file: Name of the PDF file
:type: str
:returns: Content of the pdf file or None if error
:rtype: bytes
"""
if not os.path.isfile(input_file):
logger.error("Input file does not exists.")
return None
with open(input_file, "rb") as f:
pdf_binary = f.read()
return pdf_binary
def main(argv):
# Get parameters (it is here for simplicity, it should be in a function)
try:
opts, args = getopt.getopt(
argv, "hi:o:", ["input_file=", "output_file="])
except getopt.GetoptError:
show_help()
sys.exit(2)
input_file = None
output_file = None
for opt, arg in opts:
if opt == '-h':
show_help()
sys.exit()
elif opt in ("-i", "--input"):
input_file = arg
elif opt in ("-o", "--output"):
output_file = arg
if not input_file or not output_file:
show_help()
sys.exit()
try:
# Open and read the file
logger.info('Input file: {0}'.format(input_file))
pdf_binary = read_pdf_file(input_file)
if pdf_binary:
# logger.debug('Input file opened. Content: {0}'.format(pdf_binary))
logger.debug('Input file opened')
# Encode file
pdf_helper = PdfManager()
result = pdf_helper.process(pdf_binary)
logger.debug('PDF file processed. Content: {0}'.format(result))
# logger.debug('PDF file processed')
# Save file
with open(output_file, "w") as text_file:
text_file.write(result)
logger.debug('Text file saved: {0}'.format(output_file))
else:
logger.critical('Error reading input file')
except Exception as e:
# This is not neccesary as exceptions are controlled in caller function
logger.critical(
"Exception on pdf2txt service: {0}".format(repr(e)))
if __name__ == "__main__":
try:
logger.info("Starting PDF to text service")
main(sys.argv[1:])
logger.info("PDF to text service finished")
except Exception as e:
logger.critical(
"*** Exception on PDF to text service: {0}".format(repr(e)))
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| StarcoderdataPython |
23679 | # Flight duration model: Just distance
# In this exercise you'll build a regression model to predict flight duration (the duration column).
# For the moment you'll keep the model simple, including only the distance of the flight (the km column) as a predictor.
# The data are in flights. The first few records are displayed in the terminal. These data have also been split into training and testing sets and are available as flights_train and flights_test.
# Instructions
# 100 XP
# Create a linear regression object. Specify the name of the label column. Fit it to the training data.
# Make predictions on the testing data.
# Create a regression evaluator object and use it to evaluate RMSE on the testing data.
from pyspark.ml.regression import LinearRegression
from pyspark.ml.evaluation import RegressionEvaluator
# Create a regression object and train on training data
regression = LinearRegression(labelCol='duration').fit(flights_train)
# Create predictions for the testing data and take a look at the predictions
predictions = regression.transform(flights_test)
predictions.select('duration', 'prediction').show(5, False)
# Calculate the RMSE
RegressionEvaluator(labelCol='duration').evaluate(predictions) | StarcoderdataPython |
15425 | from flask_wtf import FlaskForm
from wtforms import SubmitField, SelectField, IntegerField, FloatField, StringField
from wtforms.validators import DataRequired
import pandas as pd
uniq_vals = pd.read_csv("data/unique_cat_vals.csv", index_col=0)
class InputData(FlaskForm):
car = SelectField(label="Car", choices=uniq_vals.car.dropna().sort_values(), validators=[DataRequired()])
model = SelectField("Model", choices=uniq_vals.model.dropna().sort_values(), validators=[DataRequired()])
body = SelectField(label="Body", choices=uniq_vals.body.dropna().sort_values(), validators=[DataRequired()])
drive = SelectField("Drive", choices=uniq_vals.drive.dropna().sort_values(), validators=[DataRequired()])
engType = SelectField("Engine type: ", choices=uniq_vals.engType.dropna().sort_values(), validators=[DataRequired()])
engV = FloatField("Engine Volume", validators=[DataRequired()])
year = IntegerField("Year", validators=[DataRequired()])
mileage = IntegerField(label="Mileage", validators=[DataRequired()])
registration = SelectField(label="Registration", choices=uniq_vals.registration.dropna())
submit = SubmitField("Predict the price")
| StarcoderdataPython |
1663844 | from kw_sorter.interfaces import ISortEntry
from kw_sorter.sorter import Sorter, SortByEntry
from kw_tests.common_class import CommonTestClass
class ItemTest(CommonTestClass):
def test_entry(self):
entry = SortByEntry()
assert not entry.get_key()
assert ISortEntry.DIRECTION_ASC == entry.get_direction()
entry.set_key('any')
assert 'any' == entry.get_key()
assert ISortEntry.DIRECTION_ASC == entry.get_direction()
entry.set_direction(ISortEntry.DIRECTION_DESC)
assert ISortEntry.DIRECTION_DESC == entry.get_direction()
entry.set_direction('bad')
assert ISortEntry.DIRECTION_DESC == entry.get_direction()
def test_sorter_basic(self):
sorter = Sorter()
assert 1 > len(list(sorter.get_entries()))
assert isinstance(sorter.get_default_item(), ISortEntry)
sorter.add(self._mock_entry_1())
sorter.add(self._mock_entry_2())
sorter.add(self._mock_entry_3())
assert 0 < len(list(sorter.get_entries()))
sorter.clear()
assert 1 > len(list(sorter.get_entries()))
def test_sorter_removal(self):
sorter = Sorter()
sorter.add(self._mock_entry_1())
sorter.add(self._mock_entry_3())
sorter.add(self._mock_entry_3()) # intentional
assert 3 == len(list(sorter.get_entries()))
sorter.remove(self._mock_entry_3().get_key())
assert 1 == len(list(sorter.get_entries()))
sorter.clear()
assert 1 > len(list(sorter.get_entries()))
def test_sorter_order(self):
sorter = Sorter()
sorter.add(self._mock_entry_1())
sorter.add(self._mock_entry_3())
sorter.add(self._mock_entry_2())
result = list(sorter.get_entries())
assert self._mock_entry_1().get_key() == result[0].get_key()
assert self._mock_entry_3().get_key() == result[1].get_key()
assert self._mock_entry_2().get_key() == result[2].get_key()
| StarcoderdataPython |
68779 | #sample_grammar.py
import argparse
import random
import numpy.random
import pcfgfactory
import pcfg
import utility
parser = argparse.ArgumentParser(description='Replace low probability tokens with an UNK token for a given PCFG')
parser.add_argument("inputfilename", help="File where the original PCFG is.")
parser.add_argument("outputfilename", help="File where the resulting PCFG will be stored.")
parser.add_argument("--threshold", help="Probability (not expectation) default 1e-5", default=1e-5,type=float)
parser.add_argument("--unk", help="Symbol to use, default UNK", default="UNK")
args = parser.parse_args()
original = pcfg.load_pcfg_from_file(args.inputfilename)
newterminals = set()
te = original.terminal_expectations()
L = original.expected_length()
for a,e in te.items():
if e * L > args.threshold:
newterminals.add(a)
unk_probs = { nt:0 for nt in original.nonterminals }
newproductions = []
newparameters = {}
for prod,e in original.parameters.items():
if len(prod) == 3:
newproductions.append(prod)
newparameters[prod] = e
else:
nt,a = prod
if a in newterminals:
newproductions.append(prod)
newparameters[prod] = e
else:
unk_probs[nt] += e
for nt,p in unk_probs.items():
if p > 0:
prod = (nt,args.unk)
newproductions.append(prod)
newparameters[prod] = p
original.terminals = newterminals
original.productions = newproductions
original.parameters = newparameters
original.store(args.outputfilename,header=[ "Unkified with symbol %s, threshold %e" % (args.unk,args.threshold)])
| StarcoderdataPython |
1622464 | #!/usr/bin/env python3
# by dongchao <<EMAIL>>
from flask import render_template
from . import dashboard
@dashboard.route('/dashboard_index/', methods=['POST', 'GET'])
def dashboard_index():
return render_template('dashboard.html')
| StarcoderdataPython |
167782 | <filename>pyanime4k/error.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Name: PyAnime4K error
Author: TianZerL
Editor: K4YT3X
"""
from pyanime4k.wrapper import *
"""
typedef enum ac_error
{
AC_OK = 0,
AC_ERROR_NULL_INSTANCE,
AC_ERROR_NULL_PARAMETERS,
AC_ERROR_NULL_Data,
AC_ERROR_INIT_GPU,
AC_ERROR_PORCESSOR_TYPE,
AC_ERROR_LOAD_IMAGE,
AC_ERROR_LOAD_VIDEO,
AC_ERROR_INIT_VIDEO_WRITER,
AC_ERROR_GPU_PROCESS,
AC_ERROR_SAVE_TO_NULL_POINTER,
AC_ERROR_NOT_YUV444,
AC_ERROR_YUV444_AND_RGB32_AT_SAME_TIME,
AC_ERROR_CUDA_NOT_SUPPORTED
} ac_error;
"""
error_code_str = {
AC_OK: "AC_OK",
AC_ERROR_NULL_INSTANCE: "AC_ERROR_NULL_INSTANCE",
AC_ERROR_NULL_PARAMETERS: "AC_ERROR_NULL_PARAMETERS",
AC_ERROR_NULL_Data: "AC_ERROR_NULL_Data",
AC_ERROR_INIT_GPU: "AC_ERROR_INIT_GPU",
AC_ERROR_PORCESSOR_TYPE: "AC_ERROR_PORCESSOR_TYPE",
AC_ERROR_LOAD_IMAGE: "AC_ERROR_LOAD_IMAGE",
AC_ERROR_LOAD_VIDEO: "AC_ERROR_LOAD_VIDEO",
AC_ERROR_INIT_VIDEO_WRITER: "AC_ERROR_INIT_VIDEO_WRITER",
AC_ERROR_GPU_PROCESS: "AC_ERROR_GPU_PROCESS",
AC_ERROR_SAVE_TO_NULL_POINTER: "AC_ERROR_SAVE_TO_NULL_POINTER",
AC_ERROR_NOT_YUV444: "AC_ERROR_NOT_YUV444",
AC_ERROR_VIDEO_MODE_UNINIT: "AC_ERROR_VIDEO_MODE_UNINIT",
AC_ERROR_YUV444_AND_RGB32_AT_SAME_TIME: "AC_ERROR_YUV444_AND_RGB32_AT_SAME_TIME",
AC_ERROR_CUDA_NOT_SUPPORTED: "AC_ERROR_CUDA_NOT_SUPPORTED"
}
class ACError(Exception):
def __init__(self, code, *args, **kwargs):
super().__init__(*args, **kwargs)
self.msg = error_code_str[code]
def __str__(self):
return f"AC error: {self.msg}"
| StarcoderdataPython |
3211934 | <reponame>bradhackinen/frdocs<filename>preprocessing/compile_parsed.py
import os
from argparse import ArgumentParser
from pathlib import Path
import random
from collections import Counter
from tqdm import tqdm
import gzip
from lxml import etree as et
import pandas as pd
from frdocs.preprocessing.parsing import parse_reg_xml_tree, FrdocResolver
from frdocs.config import data_dir
'''
This script compiles parsed versions of each document from bulk XML files.
Each parsed document is a pandas dataframe, saved in pickle format. Files are
named by document number so that they can be retreived without looking up the
publication date.
'''
def iter_docs(xml_dir):
for xml_file in tqdm(sorted(os.listdir(xml_dir))):
pub_date = xml_file.split('.')[0]
with gzip.open(xml_dir / xml_file,'rb') as f:
tree = et.parse(f)
volume = int(tree.xpath('.//VOL/text()')[0])
for fr_type in ['NOTICE','PRORULE','RULE']:
for type_element in tree.xpath(f'.//{fr_type}S'):
try:
start_page = int(type_element.xpath('.//PRTPAGE/@P')[0])
except IndexError:
start_page = -1
for doc_element in type_element.xpath(f'.//{fr_type}'):
# doc_tree = et.ElementTree(doc_element)
doc = {
'doc_tree':et.ElementTree(doc_element),
# 'fr_type':fr_type.lower(),
'volume':volume,
'publication_date':pub_date,
'start_page':start_page,
}
# Get end page from page elements
print_pages = [int(page) for page in doc_element.xpath('.//PRTPAGE/@P') if page.isdigit()]
doc['end_page'] = max([start_page] + print_pages)
# End page for this doc is start page for next doc
start_page = doc['end_page']
# Can only get the FR document number from the end of the document
frdoc_elements = doc_element.xpath('./FRDOC')
if not frdoc_elements:
print(f'Warning: Could not find FRDOC element in {xml_file}: {tree.getpath(doc_element)}')
doc['frdoc_string'] = None
elif len(frdoc_elements) > 1:
print(f'Warning: Found {len(frdoc_elements)} FRDOC elements in {xml_file}: {tree.getpath(doc_element)}')
doc['frdoc_string'] = None
else:
doc['frdoc_string'] = ' '.join(frdoc_elements[0].itertext())
yield doc
def main(args):
print('Parsing documents from daily XML files')
xml_dir = Path(data_dir) / 'raw' / 'xml'
parsed_dir = Path(data_dir) / 'parsed'
if not os.path.isdir(parsed_dir):
os.mkdir(parsed_dir)
frdoc_resolver = FrdocResolver()
if not args.force_update:
existing = {f.rsplit('.',1)[0] for f in os.listdir(parsed_dir)}
print(f'Found {len(existing)} existing parsed files ({len(frdoc_resolver.all_frdocs - existing)} remaining to parse)')
else:
existing = set()
n_parsed = 0
frdoc_counts = Counter()
failed = []
for doc in iter_docs(xml_dir):
frdoc = frdoc_resolver(doc)
if frdoc:
frdoc_counts.update([frdoc])
if (frdoc not in existing) or args.force_update:
parsed_df = parse_reg_xml_tree(doc['doc_tree'])
parsed_df.to_pickle(parsed_dir/f'{frdoc}.pkl')
existing.add(frdoc)
n_parsed += 1
else:
failed.append(doc)
print(f'Parsed {n_parsed} new documents')
completeness = len(existing)/len(frdoc_resolver.all_frdocs)
print(f'Database now has parsed documents, covering {100*completeness:.1f}% of frdoc numbers with metadata')
missing = list(frdoc_resolver.all_frdocs - existing)
if missing:
print(f'Missing parsed documents for {len(missing)} frdoc numbers ')
print('Examples include:\n\t' + '\n\t'.join(random.sample(missing,k=min(20,len(missing)))))
n_dups = sum(c > 1 for c in frdoc_counts.values())
print(f'{n_dups} resolved document numbers appear multiple times')
if n_dups:
common_dups = {d:c for d,c in frdoc_counts.most_common(20) if c > 1}
print('Most common examples:\n\t' + '\n\t'.join(f'{d} (x{c})' for d,c in common_dups.items()))
print(f'Failed to resolve frdoc numbers for {len(failed)} documents')
if failed:
print('Examples include:')
for failed_doc in random.sample(failed,k=min(20,len(failed))):
print(failed_doc)
# Add parsed information to index
print('Adding parsing success info to index')
index_df = pd.read_csv(Path(data_dir)/'index.csv')
index_df['parsed'] = index_df['frdoc_number'].isin(existing)
index_df.to_csv(Path(data_dir)/'index.csv',index=False)
if completeness < 1:
missing_df = index_df[~index_df['parsed']]
print('Missing parsed docs by top publication date (top 20):')
print(missing_df.groupby('publication_date')[['frdoc_number']].count().sort_values('frdoc_number',ascending=False).head(20))
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--force_update',dest='force_update',action='store_true')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
1674720 | #!/usr/bin/python3
"""
imports Flask instance for gunicorn configurations
gunicorn --bind 127.0.0.1:8001 wsgi:web_flask.app
"""
web_flask = __import__('web_flask.6-number_odd_or_even',
globals(), locals(), ['*'])
if __name__ == "__main__":
"""runs the main flask app"""
web_flask.app.run()
| StarcoderdataPython |
21756 | <gh_stars>0
from .l2norm import L2Norm
from .multibox_loss import MultiBoxLoss
from .multibox_focalloss import MultiBoxFocalLoss
__all__ = ['L2Norm', 'MultiBoxLoss', 'MultiBoxFocalLoss'] | StarcoderdataPython |
83753 | # Copyright (c) 2016-2020, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Module for handling paths. """
import collections.abc
import pathlib
import posixpath
import re
import sys
# Type hints
from typing import Union, Dict, Tuple
if sys.version_info >= (3, 9):
from re import Pattern, Match
from collections.abc import Sequence
else:
from typing import Pattern, Match, Sequence
# Define the type for all paths
Path = Union[str, bytes, pathlib.PurePath,
Sequence[Union[str, bytes, pathlib.PurePath]]]
# For escaping and unescaping unicode paths, we need compiled regular
# expressions to finding sequences of one or more dots, find slashes,
# and hex escapes. In addition, we need a dict to lookup the slash
# conversions. Compiling the regular expressions here at initialization
# will help performance by not having to compile new ones every time a
# path is processed.
_find_dots_re: Pattern[str] = re.compile('\\.+')
_find_invalid_escape_re: Pattern[str] = re.compile(
'(^|[^\\\\])\\\\(\\\\\\\\)*($|[^xuU\\\\]'
'|x[0-9a-fA-F]?($|[^0-9a-fA-F])'
'|u[0-9a-fA-F]{0,3}($|[^0-9a-fA-F])'
'|U[0-9a-fA-F]{0,7}($|[^0-9a-fA-F]))')
_find_fslashnull_re: Pattern[str] = re.compile('[\\\\/\x00]')
_find_escapes_re: Pattern[str] = re.compile(
'\\\\+(x[0-9a-fA-F]{2}|u[0-9a-fA-F]{4}|U[0-9a-fA-F]{8})')
_char_escape_conversions: Dict[str, str] = {'\x00': '\\x00',
'/': '\\x2f',
'\\': '\\\\'}
def _replace_fun_escape(m: Match[str]) -> str:
""" Hex/unicode escape single characters found in regex matches.
Supports single hex/unicode escapes of the form ``'\\xYY'``,
``'\\uYYYY'``, and ``'\\UYYYYYYYY'`` where Y is a hex digit and
converting single backslashes to double backslashes.
.. versionadded:: 0.2
Parameters
----------
m : regex match
Returns
-------
s : str
The hex excaped version of the character.
Raises
------
NotImplementedError
If the character is not in the supported character code range.
"""
c = m.group(0)
# If it is one of the characters that we use a particular escape
# for, return it.
if c in _char_escape_conversions:
return _char_escape_conversions[c]
# We need to make the \\xYY, \\uYYYY, or \\UYYYYYYYY encoding. To do
# that, we get the character code and do different things depending
# on its size.
value = ord(c)
if value <= 0xFF:
return '\\x{0:02x}'.format(value)
elif value <= 0xFFFF:
return '\\u{0:04x}'.format(value)
elif value <= 0xFFFFFFFF:
return '\\U{0:08x}'.format(value)
else:
raise NotImplementedError('Cannot escape a character whose '
'code it outside of the range '
'0 - 0xFFFFFFFF.')
def _replace_fun_unescape(m: Match[str]) -> str:
""" Decode single hex/unicode escapes found in regex matches.
Supports single hex/unicode escapes of the form ``'\\xYY'``,
``'\\uYYYY'``, and ``'\\UYYYYYYYY'`` where Y is a hex digit. Only
decodes if there is an odd number of backslashes.
.. versionadded:: 0.2
Parameters
----------
m : regex match
Returns
-------
c : str
The unescaped character.
"""
slsh = b'\\'.decode('ascii')
s = m.group(0)
count = s.count(slsh)
if count % 2 == 0:
return s
else:
c = chr(int(s[(count + 1):], base=16))
return slsh * (count - 1) + c
def escape_path(pth: Union[str, bytes]) -> str:
""" Hex/unicode escapes a path.
Escapes a path so that it can be represented faithfully in an HDF5
file without changing directories. This means that leading ``'.'``
must be escaped. ``'/'`` and null must be escaped to. Backslashes
are escaped as double backslashes. Other escaped characters are
replaced with ``'\\xYY'``, ``'\\uYYYY', or ``'\\UYYYYYYYY'`` where Y
are hex digits depending on the unicode numerical value of the
character. for ``'.'``, both slashes, and null; this will be the
former (``'\\xYY'``).
.. versionadded:: 0.2
Parameters
----------
pth : str or bytes
The path to escape.
Returns
-------
epth : str
The escaped path.
Raises
------
TypeError
If `pth` is not the right type.
See Also
--------
unescape_path
"""
if isinstance(pth, bytes):
pth = pth.decode('utf-8')
if not isinstance(pth, str):
raise TypeError('pth must be str or bytes.')
match = _find_dots_re.match(pth)
if match is None:
prefix = ''
s = pth
else:
prefix = '\\x2e' * match.end()
s = pth[match.end():]
return prefix + _find_fslashnull_re.sub(_replace_fun_escape, s)
def unescape_path(pth: Union[str, bytes]) -> str:
""" Hex/unicode unescapes a path.
Unescapes a path. Valid escapeds are ``'\\xYY'``, ``'\\uYYYY', or
``'\\UYYYYYYYY'`` where Y are hex digits giving the character's
unicode numerical value and double backslashes which are the escape
for single backslashes.
.. versionadded:: 0.2
Parameters
----------
pth : str or bytes
The path to unescape.
Returns
-------
unpth : str
The unescaped path.
Raises
------
TypeError
If `pth` is not the right type.
ValueError
If an invalid escape is found.
See Also
--------
escape_path
"""
if isinstance(pth, bytes):
pth = pth.decode('utf-8')
if not isinstance(pth, str):
raise TypeError('pth must be str or bytes.')
# Look for invalid escapes.
if _find_invalid_escape_re.search(pth) is not None:
raise ValueError('Invalid escape found.')
# Do all hex/unicode escapes.
s = _find_escapes_re.sub(_replace_fun_unescape, pth)
# Do all double backslash escapes.
return s.replace(b'\\\\'.decode('ascii'), b'\\'.decode('ascii'))
def process_path(pth: Path) -> Tuple[str, str]:
""" Processes paths.
Processes the provided path and breaks it into it Group part
(`groupname`) and target part (`targetname`). ``bytes`` paths are
converted to ``str``. Separated paths are given as an iterable of
``str`` and ``bytes``. Each part of a separated path is escaped
using ``escape_path``. Otherwise, the path is assumed to be already
escaped. Escaping is done so that targets with a part that starts
with one or more periods, contain slashes, and/or contain nulls can
be used without causing the wrong Group to be looked in or the wrong
target to be looked at. It essentially allows one to make a Dataset
named ``'..'`` or ``'a/a'`` instead of moving around in the Dataset
hierarchy.
All paths are POSIX style.
.. versionadded:: 0.2
Parameters
----------
pth : str or bytes or pathlib.PurePath or Sequence
The POSIX style path as a ``str`` or ``bytes`` or the
separated path in an Sequence with the elements being ``str``,
``bytes``, and ``pathlib.PurePath``. For separated paths,
escaping will be done on each part.
Returns
-------
groupname : str
The path to the Group containing the target `pth` was pointing
to.
targetname : str
The name of the target pointed to by `pth` in the Group
`groupname`.
Raises
------
TypeError
If `pth` is not of the right type.
See Also
--------
escape_path
"""
# Do conversions and possibly escapes.
if isinstance(pth, bytes):
p = pth.decode('utf-8')
elif isinstance(pth, str):
p = pth
elif isinstance(pth, pathlib.PurePath):
parts = pth.parts
if pth.root not in ('', '/'):
p = posixpath.join(*parts[1:])
else:
p = posixpath.join(*parts)
elif isinstance(pth, collections.abc.Sequence):
# Escape (and possibly convert to str) each element and then
# join them all together.
parts_seq = []
for i, s in enumerate(pth):
if isinstance(s, bytes):
s = s.decode('utf-8')
elif isinstance(s, pathlib.PurePath):
s = str(s)
elif not isinstance(s, str):
raise TypeError('Elements of p must be str, bytes, or '
'pathlib.PurePath.')
parts_seq.append(escape_path(s))
parts = tuple(parts_seq)
p = posixpath.join(*parts)
else:
raise TypeError('p must be str, bytes, pathlib.PurePath, or '
'an Sequence solely of one of those three.')
# Remove double slashes and a non-root trailing slash.
path = posixpath.normpath(p)
# Extract the group name and the target name (will be a dataset if
# data can be mapped to it, but will end up being made into a group
# otherwise. As HDF5 files use posix path, conventions, posixpath
# will do everything.
groupname = posixpath.dirname(path)
targetname = posixpath.basename(path)
# If groupname got turned into blank, then it is just root.
if len(groupname) == 0:
groupname = b'/'.decode('ascii')
# If targetname got turned blank, then it is the current directory.
if len(targetname) == 0:
targetname = b'.'.decode('ascii')
return groupname, targetname
| StarcoderdataPython |
3271287 | <reponame>zatang007/BayesianOptimization<gh_stars>1-10
from bayes_opt.bayesian_optimization import Observable
EVENTS = ["a", "b", "c"]
class SimpleObserver():
def __init__(self):
self.counter = 0
def update(self, event, instance):
self.counter += 1
def test_get_subscribers():
observer = SimpleObserver()
observable = Observable(events=EVENTS)
observable.subscribe("a", observer)
assert observer in observable.get_subscribers('a')
assert observer not in observable.get_subscribers('b')
assert observer not in observable.get_subscribers('c')
assert len(observable.get_subscribers('a')) == 1
assert len(observable.get_subscribers('b')) == 0
assert len(observable.get_subscribers('c')) == 0
def test_unsubscribe():
observer = SimpleObserver()
observable = Observable(events=EVENTS)
observable.subscribe("a", observer)
observable.unsubscribe("a", observer)
assert observer not in observable.get_subscribers('a')
assert len(observable.get_subscribers('a')) == 0
def test_dispatch():
observer_a = SimpleObserver()
observer_b = SimpleObserver()
observable = Observable(events=EVENTS)
observable.subscribe("a", observer_a)
observable.subscribe("b", observer_b)
assert observer_a.counter == 0
assert observer_b.counter == 0
observable.dispatch('b')
assert observer_a.counter == 0
assert observer_b.counter == 1
observable.dispatch('a')
observable.dispatch('b')
assert observer_a.counter == 1
assert observer_b.counter == 2
observable.dispatch('a')
observable.dispatch('c')
assert observer_a.counter == 2
assert observer_a.counter == 2
if __name__ == '__main__':
r"""
CommandLine:
python tests/test_observer.py
"""
import pytest
pytest.main([__file__])
| StarcoderdataPython |
3354519 | # -*- coding: utf-8 -*-
from dot_commands import setupCommands, fileAnalizer
import os
registerd = ["setup", "info"]
''
def _prettyTable(data, total):
maxSize = 0
postWordSize = 7
for line in data:
if len(line["label"]) > maxSize:
maxSize = len(line["label"])
data = sorted(data, key=lambda k: k['count'])[::-1]
for index,line in enumerate(data):
percentOffset = (maxSize - len(line["label"])) + postWordSize
percent = float((100.0*line["count"]) / total)
print str(index + 1) + "º " + line["label"] + " files ",
print "-" * percentOffset,
print("%.2f%% - %d" % (percent, line["count"]))
def setup():
setupCommands.list(os.getcwd())
def info():
_dir = os.getcwd()
print "Project main language:"
print fileAnalizer.getProjectMainType(_dir)
print "\nProject Data: "
files, total = fileAnalizer.listFiles(_dir, True)
_prettyTable(files,total)
# n - 100
# x - y
# n/y = 100x
# y = 100x/n
| StarcoderdataPython |
3282622 | <gh_stars>1-10
#! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorflow as tf
from tensorlayer import logging
from tensorlayer.decorators import deprecated_alias
from tensorlayer.layers.core import Layer
from tensorlayer.files import utils
# from tensorlayer.layers.core import TF_GRAPHKEYS_VARIABLES
__all__ = [
'Lambda',
'ElementwiseLambda',
]
class Lambda(Layer):
"""A layer that takes a user-defined function using Lambda.
If the function has trainable weights, the weights should be provided.
Remember to make sure the weights provided when the layer is constructed are SAME as
the weights used when the layer is forwarded.
For multiple inputs see :class:`ElementwiseLambda`.
Parameters
----------
fn : function
The function that applies to the inputs (e.g. tensor from the previous layer).
fn_weights : list
The trainable weights for the function if any. Optional.
fn_args : dict
The arguments for the function if any. Optional.
name : str or None
A unique layer name.
Examples
---------
Non-parametric and non-args case
This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional).
>>> x = tl.layers.Input([8, 3], name='input')
>>> y = tl.layers.Lambda(lambda x: 2*x, name='lambda')(x)
Non-parametric and with args case
This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional).
>>> def customize_func(x, foo=42): # x is the inputs, foo is an argument
>>> return foo * x
>>> x = tl.layers.Input([8, 3], name='input')
>>> lambdalayer = tl.layers.Lambda(customize_func, fn_args={'foo': 2}, name='lambda')(x)
Any function with outside variables
This case has not been supported in Model.save() / Model.load() yet.
Please avoid using Model.save() / Model.load() to save / load models that contain such Lambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights.
Note: In this case, fn_weights should be a list, and then the trainable weights in this Lambda layer can be added into the weights of the whole model.
>>> vara = [tf.Variable(1.0)]
>>> def func(x):
>>> return x + vara
>>> x = tl.layers.Input([8, 3], name='input')
>>> y = tl.layers.Lambda(func, fn_weights=a, name='lambda')(x)
Parametric case, merge other wrappers into TensorLayer
This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional).
>>> layers = [
>>> tf.keras.layers.Dense(10, activation=tf.nn.relu),
>>> tf.keras.layers.Dense(5, activation=tf.nn.sigmoid),
>>> tf.keras.layers.Dense(1, activation=tf.identity)
>>> ]
>>> perceptron = tf.keras.Sequential(layers)
>>> # in order to compile keras model and get trainable_variables of the keras model
>>> _ = perceptron(np.random.random([100, 5]).astype(np.float32))
>>> class CustomizeModel(tl.models.Model):
>>> def __init__(self):
>>> super(CustomizeModel, self).__init__()
>>> self.dense = tl.layers.Dense(in_channels=1, n_units=5)
>>> self.lambdalayer = tl.layers.Lambda(perceptron, perceptron.trainable_variables)
>>> def forward(self, x):
>>> z = self.dense(x)
>>> z = self.lambdalayer(z)
>>> return z
>>> optimizer = tf.optimizers.Adam(learning_rate=0.1)
>>> model = CustomizeModel()
>>> model.train()
>>> for epoch in range(50):
>>> with tf.GradientTape() as tape:
>>> pred_y = model(data_x)
>>> loss = tl.cost.mean_squared_error(pred_y, data_y)
>>> gradients = tape.gradient(loss, model.trainable_weights)
>>> optimizer.apply_gradients(zip(gradients, model.trainable_weights))
"""
def __init__(
self,
fn,
fn_weights=None,
fn_args=None,
name=None,
):
super(Lambda, self).__init__(name=name)
self.fn = fn
self._trainable_weights = fn_weights if fn_weights is not None else []
self.fn_args = fn_args if fn_args is not None else {}
try:
fn_name = repr(self.fn)
except:
fn_name = 'name not available'
logging.info("Lambda %s: func: %s, len_weights: %s" % (self.name, fn_name, len(self._trainable_weights)))
self.build()
self._built = True
def __repr__(self):
s = '{classname}('
s += 'fn={fn_name},'
s += 'len_weights={len_weights},'
s += 'name=\'{name}\''
s += ')'
try:
fn_name = repr(self.fn)
except:
fn_name = 'name not available'
return s.format(
classname=self.__class__.__name__, fn_name=fn_name, len_weights=len(self._trainable_weights),
**self.__dict__
)
def build(self, inputs_shape=None):
pass
def forward(self, inputs, **kwargs):
if len(kwargs) == 0:
outputs = self.fn(inputs, **self.fn_args)
else:
outputs = self.fn(inputs, **kwargs)
return outputs
def get_args(self):
init_args = {}
if isinstance(self.fn, tf.keras.layers.Layer) or isinstance(self.fn, tf.keras.Model):
init_args.update({"layer_type": "keraslayer"})
init_args["fn"] = utils.save_keras_model(self.fn)
init_args["fn_weights"] = None
if len(self._nodes) == 0:
init_args["keras_input_shape"] = []
else:
init_args["keras_input_shape"] = self._nodes[0].in_tensors[0].get_shape().as_list()
else:
init_args = {"layer_type": "normal"}
return init_args
class ElementwiseLambda(Layer):
"""A layer that use a custom function to combine multiple :class:`Layer` inputs.
If the function has trainable weights, the weights should be provided.
Remember to make sure the weights provided when the layer is constructed are SAME as
the weights used when the layer is forwarded.
Parameters
----------
fn : function
The function that applies to the inputs (e.g. tensor from the previous layer).
fn_weights : list
The trainable weights for the function if any. Optional.
fn_args : dict
The arguments for the function if any. Optional.
name : str or None
A unique layer name.
Examples
--------
Non-parametric and with args case
This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional).
z = mean + noise * tf.exp(std * 0.5) + foo
>>> def func(noise, mean, std, foo=42):
>>> return mean + noise * tf.exp(std * 0.5) + foo
>>> noise = tl.layers.Input([100, 1])
>>> mean = tl.layers.Input([100, 1])
>>> std = tl.layers.Input([100, 1])
>>> out = tl.layers.ElementwiseLambda(fn=func, fn_args={'foo': 84}, name='elementwiselambda')([noise, mean, std])
Non-parametric and non-args case
This case is supported in the Model.save() / Model.load() to save / load the whole model architecture and weights(optional).
z = mean + noise * tf.exp(std * 0.5)
>>> noise = tl.layers.Input([100, 1])
>>> mean = tl.layers.Input([100, 1])
>>> std = tl.layers.Input([100, 1])
>>> out = tl.layers.ElementwiseLambda(fn=lambda x, y, z: x + y * tf.exp(z * 0.5), name='elementwiselambda')([noise, mean, std])
Any function with outside variables
This case has not been supported in Model.save() / Model.load() yet.
Please avoid using Model.save() / Model.load() to save / load models that contain such ElementwiseLambda layer. Instead, you may use Model.save_weights() / Model.load_weights() to save / load model weights.
Note: In this case, fn_weights should be a list, and then the trainable weights in this ElementwiseLambda layer can be added into the weights of the whole model.
z = mean + noise * tf.exp(std * 0.5) + vara
>>> vara = [tf.Variable(1.0)]
>>> def func(noise, mean, std):
>>> return mean + noise * tf.exp(std * 0.5) + vara
>>> noise = tl.layers.Input([100, 1])
>>> mean = tl.layers.Input([100, 1])
>>> std = tl.layers.Input([100, 1])
>>> out = tl.layers.ElementwiseLambda(fn=func, fn_weights=vara, name='elementwiselambda')([noise, mean, std])
"""
def __init__(
self,
fn,
fn_weights=None,
fn_args=None,
name=None, #'elementwiselambda',
):
super(ElementwiseLambda, self).__init__(name=name)
self.fn = fn
self._trainable_weights = fn_weights if fn_weights is not None else []
self.fn_args = fn_args if fn_args is not None else {}
try:
fn_name = repr(self.fn)
except:
fn_name = 'name not available'
logging.info(
"ElementwiseLambda %s: func: %s, len_weights: %s" % (self.name, fn_name, len(self._trainable_weights))
)
self.build()
self._built = True
def __repr__(self):
s = '{classname}('
s += 'fn={fn_name},'
s += 'len_weights={len_weights},'
s += 'name=\'{name}\''
s += ')'
try:
fn_name = repr(self.fn)
except:
fn_name = 'name not available'
return s.format(
classname=self.__class__.__name__, fn_name=fn_name, len_weights=len(self._trainable_weights),
**self.__dict__
)
def build(self, inputs_shape=None):
# do nothing
# the weights of the function are provided when the Lambda layer is constructed
pass
# @tf.function
def forward(self, inputs, **kwargs):
if not isinstance(inputs, list):
raise TypeError(
"The inputs should be a list of values which corresponds with the customised lambda function."
)
if len(kwargs) == 0:
outputs = self.fn(*inputs, **self.fn_args)
else:
outputs = self.fn(*inputs, **kwargs)
return outputs
| StarcoderdataPython |
102345 | # Generated by Django 4.0 on 2021-12-26 07:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stats', '0025_alter_player_created_by'),
]
operations = [
migrations.AlterField(
model_name='player',
name='hash_redeemable',
field=models.BooleanField(default=False, verbose_name='May be redeemed by a user'),
),
]
| StarcoderdataPython |
3283629 | <reponame>jasoncao11/nlp-notebook
import torch
import numpy as np
from sklearn import metrics
from transformers import BertModel
from load_data import traindataloader, valdataloader
BERT_PATH = '../bert-base-chinese'
device = "cuda" if torch.cuda.is_available() else 'cpu'
bert = BertModel.from_pretrained(BERT_PATH).to(device)
def get_vecs():
array = np.empty((0, 768))
with torch.no_grad():
for batch in traindataloader:
input_ids, attention_mask = batch['input_ids_1'].to(device), batch['attention_mask_1'].to(device)
outputs = bert(input_ids, attention_mask=attention_mask, output_hidden_states=True)
pooler = outputs[1]# [batch_size, 768]
pooler = pooler.cpu().data.numpy()
array = np.append(array, pooler, axis=0)
return array
vecs = get_vecs()
def compute_kernel_bias(vecs):
"""计算kernel和bias
vecs.shape = [num_samples, embedding_size],
最后的变换:y = (x + bias).dot(kernel)
"""
mu = vecs.mean(axis=0, keepdims=True)
cov = np.cov(vecs.T)
u, s, vh = np.linalg.svd(cov)
W = np.dot(u, np.diag(1 / np.sqrt(s)))
return W, -mu
kernel, bias = compute_kernel_bias(vecs)
kernel = torch.from_numpy(kernel).to(device)
bias = torch.from_numpy(bias).to(device)
for t in [0.5, 0.8, 0.9, 0.95]:
predict_all = np.array([], dtype=int)
labels_all = np.array([], dtype=int)
with torch.no_grad():
for batch in valdataloader:
input_ids_1, attention_mask_1, input_ids_2, attention_mask_2, labels = batch['input_ids_1'].to(device), batch['attention_mask_1'].to(device), batch['input_ids_2'].to(device), batch['attention_mask_2'].to(device), batch['labels'].to(device)
outputs_1 = bert(input_ids_1, attention_mask=attention_mask_1, output_hidden_states=True)
pooler_1 = outputs_1[1]# [batch_size, 768]
pooler_1 = torch.matmul((pooler_1 + bias), kernel)
outputs_2 = bert(input_ids_2, attention_mask=attention_mask_2, output_hidden_states=True)
pooler_2 = outputs_2[1]# [batch_size, 768]
pooler_2 = torch.matmul((pooler_2 + bias), kernel)
simi = torch.cosine_similarity(pooler_1, pooler_2)
pred = torch.where(simi>=t, 1, 0)
pred = pred.cpu().numpy()
predict_all = np.append(predict_all, pred)
truth = labels.cpu().numpy()
labels_all = np.append(labels_all, truth)
acc = metrics.accuracy_score(labels_all, predict_all)
print(f'Threshold-{t}: Accuracy on dev is {acc}') | StarcoderdataPython |
1790954 | # -*- coding: utf-8 -*-
"""
A module into which all ORM classes are imported.
To avoid circular imports almost all code should import ORM classes from this
module rather than importing them directly,
``from h import models`` rather than ``from h.foo import models``
This is a convenience - you can just import this one module and all of the
ORM classes will be defined, instead of having to import every models module
individually.
For example when testing ORM classes the test module for ``h.foo.models.Bar``
can't just import ``h.foo.models``, it would also need to import the models
module for each database table that ``Bar`` has a (direct or indirect) foreign
key to. So for convenience the test module can instead just do
``from h import models`` and have all ORM classes be defined.
"""
from memex.models.annotation import Annotation
from memex.models.document import Document, DocumentMeta, DocumentURI
from h.notification import models as notification_models
from h.models.activation import Activation
from h.models.auth_client import AuthClient
from h.models.blocklist import Blocklist
from h.models.feature import Feature
from h.models.feature_cohort import FeatureCohort
from h.models.group import Group
from h.models.token import Token
from h.models.user import User
from h.models.uri import Uri
__all__ = (
'Activation',
'Annotation',
'AuthClient',
'Blocklist',
'Document',
'DocumentMeta',
'DocumentURI',
'Feature',
'FeatureCohort',
'Group',
'Subscriptions',
'Token',
'User',
'Uri',
)
Subscriptions = notification_models.Subscriptions
def includeme(_):
# This module is included for side-effects only. SQLAlchemy models register
# with the global metadata object when imported.
pass
| StarcoderdataPython |
4823451 | <reponame>manishanker/octopuslabs-test
#!/usr/bin/env python2.7
import MySQLdb
import base64
from asymmetric_encryption import encrypt_message, decrypt_message
import config
class DBConnection:
def __init__(self, DB_HOST, DB_PORT, DB_USER, DB_PASSWORD, DB_NAME):
self.host = DB_HOST
self.port = DB_PORT
self.name = DB_NAME
self.user = DB_USER
self.password = <PASSWORD>
self.conn = None
def get_conn(self):
if self.conn is None:
self.conn = MySQLdb.connect(host = self.host,
port = self.port,
db = self.name,
user = self.user,
passwd = <PASSWORD>)
return self.conn
def create_table(key):
mydbconnobj = DBConnection(config.config["host"],config.config["port"],\
config.config["user"],config.config["passwd"],config.config["db"])
mydbconn = mydbconnobj.get_conn()
# Create table as per requirement
if key=="tuple":
sql = """CREATE TABLE IF NOT EXISTS word_data (\
word LONGTEXT NOT NULL,\
word_encrypted LONGTEXT,\
frequency INT,\
PRIMARY KEY(word(100)),\
FULLTEXT (word)\
)"""
else:
sql = """CREATE TABLE IF NOT EXISTS url_data (\
url LONGTEXT NOT NULL,\
senti LONGTEXT,\
PRIMARY KEY(url(100)),\
FULLTEXT (url)\
)"""
cursor = mydbconn.cursor()
try:
cursor.execute(sql)
except:
print "sql create", sql, key
mydbconn.close()
def update_table(word_tuple, key):
mydbconnobj = DBConnection(config.config["host"],config.config["port"],\
config.config["user"],config.config["passwd"],config.config["db"])
mydbconn = mydbconnobj.get_conn()
#print "word_tuple", word_tuple
# Create table as per requirement
if key=="tuple":
sql = "INSERT INTO word_data(\
word, word_encrypted, frequency)\
VALUES ('%s', '%s', '%d') ON DUPLICATE KEY UPDATE frequency = frequency + values(frequency)" %\
(word_tuple[0], word_tuple[1], word_tuple[2])
#print "sql update", sql
else:
sql = "INSERT INTO url_data(\
url, senti)\
VALUES ('%s', '%s')" %\
(word_tuple[0], word_tuple[1])
cursor = mydbconn.cursor()
try:
cursor.execute(sql)
except:
create_table(key)
mydbconn.commit()
mydbconn.close()
def show_data():
mydbconnobj = DBConnection(config.config["host"],config.config["port"],\
config.config["user"],config.config["passwd"],config.config["db"])
mydbconn = mydbconnobj.get_conn()
sql1 = "select * from url_data"
sql2 = "select * from word_data"
cursor = mydbconn.cursor()
cursor.execute(sql1)
data_url = cursor.fetchall()
cursor.execute(sql2)
data_word = cursor.fetchall()
#print "data", data_url, data_word
return (data_url, data_word)
mydbconn.close()
#create_table()
#t = ('mani123', 'mani', 3)
#update_table(t, "tuple")
#u=('urlhashed', 'neutral')
#update_table(u,"str")
#show_data()
| StarcoderdataPython |
3325518 | from .mixins import GroupRequiredMixin
from rest_framework.response import Response
from rest_framework.views import APIView
import datetime, time
import pandas as pd
import sys, os
import numpy as np
import re
from pandas.tseries.offsets import BDay
import scipy.stats
import igraph
try:
from .semutils.analytics.portfolio.metrics import calculate_drawdowns
except:
from semutils.analytics.portfolio.metrics import calculate_drawdowns
APP_ROOT = os.path.realpath(os.path.dirname(__file__))
DataDir = os.path.join(APP_ROOT, 'data')
# #for debug
# from .mixins import GroupRequiredMixin
# class APIView(object):
# pass
# DataDir = 'data_prod'
class TradingView(GroupRequiredMixin, APIView):
group_required = ['trading']
def get(self, request, format=None):
ah = pd.read_parquet(os.path.join(DataDir, 'account_history.parquet'))
ah['Portfolio_daily_return'] = ah.PnlReturn
ah['Portfolio_equity_curve'] = (1 + ah.CumPnl)
benchmarks = ['SP500','SP400','SP600']
for b in benchmarks:
b_data = pd.read_parquet(os.path.join(DataDir, b + '.parquet'))
ah[b + '_daily_return'] = ah.TradeDate.map(b_data.IDX_PRICE.pct_change())
ah[b + '_equity_curve'] = (1 + ah[b + '_daily_return']).cumprod()
stats_cols = ['Portfolio'] + [x for x in benchmarks]
stats = pd.DataFrame(columns=stats_cols)
for c in stats_cols:
daily_ret = ah[c + '_daily_return']
stats.loc['Cumulative Return (bps)', c] = "{0:.0f}".format((ah[c + '_equity_curve'].iloc[-1] - 1) * 10000)
stats.loc['Winning Days (%)', c] = "{0:.0%}".format((daily_ret > 0).mean())
stats.loc['Min Return (bps)', c] = "{0:.0f}".format(daily_ret.min() * 10000)
stats.loc['Max Return (bps)', c] = "{0:.0f}".format(daily_ret.max() * 10000)
stats.loc['Mean Return (bps)', c] = "{0:.0f}".format(daily_ret.mean() * 10000)
stats.loc['Std Dev Return (bps)', c] = "{0:.0f}".format(daily_ret.std() * 10000)
stats.loc['Skew', c] = "{0:.1f}".format(scipy.stats.skew(daily_ret))
stats.loc['Kurtosis', c] = "{0:.1f}".format(scipy.stats.kurtosis(daily_ret))
stats.loc['Volatility - Annualized (%)', c] = "{0:.1%}".format(np.sqrt(252) * daily_ret.std())
stats.loc['Sharpe - Annualized', c] = "{0:.1f}".format(np.sqrt(252) * daily_ret.mean() / daily_ret.std())
stats.loc['Sortino - Annualized', c] = "{0:.1f}".format(
np.sqrt(252) * daily_ret.mean() / daily_ret.clip(upper=0).std())
drawdown_series, max_drawdown, drawdown_dur, max_drawdown_dur = calculate_drawdowns(ah[c + '_equity_curve'])
stats.loc['Max Drawdown (bps)', c] = "{0:.0f}".format(max_drawdown * 10000)
stats.loc['Max Drawdown Days', c] = "{0:.0f}".format(max_drawdown_dur)
stats.index.name = 'Metric'
StartingDate = ah.TradeDate.iloc[0]
EndingDate = ah.TradeDate.iloc[-1]
# convert timestamp to timetuple
ah['TradeDate'] = ah['TradeDate'].apply(lambda x: time.mktime(x.timetuple()))
stats.reset_index(inplace=True)
# build context
context = {'StartingDate': StartingDate.strftime("%m/%d/%Y"),
'EndingDate': EndingDate.strftime("%m/%d/%Y"),
'StartingNAV': '${:,}'.format(int(round(ah.SOD_Nav.iloc[0], 0))),
'EndingNAV': '${:,}'.format(int(round(ah.EOD_Nav.iloc[-1], 0))),
'TimeWeightedReturn': '{:.2%}'.format(ah.Portfolio_equity_curve.iloc[-1] - 1),
'chart_data_strategy': ah[['TradeDate', 'Portfolio_equity_curve']].values.tolist(),
'chart_data_benchmark': ah[['TradeDate', 'SP500_equity_curve']].values.tolist(),
'benchmark_name': 'SP500',
'stats': stats.to_dict(orient='records'),
'file_type': "html",
"title": "Dashboard"}
return Response(context)
class TradingExposuresView(GroupRequiredMixin, APIView):
group_required = ['trading']
def get(self, request, format=None):
## ticker matching doesn't work well. Needs to be converted to CUSIP
pos = pd.read_parquet(os.path.join(DataDir, 'nav_portfolio.parquet'))
pos = pos.drop(['Sector'],axis=1)
sm = pd.read_parquet(os.path.join(DataDir, 'sec_master.parquet'))
pos = pos.merge(sm, on='sec_id', how='left')
daily_nav = pos.groupby('data_date').MarketValueBase.sum()
pos['nav'] = pos.data_date.map(daily_nav)
#######NEED TO FIX CASH ############
pos['weight'] = pos.MarketValueBase / pos.nav
pos['weight_abs'] = pos.weight.abs()
gross_ind = pos.groupby(['data_date', 'Sector', 'Industry']).weight_abs.sum().to_frame(
'Gross')
net_ind = pos.groupby(['data_date', 'Sector', 'Industry']).weight.sum().to_frame(
'Net_unadj')
net_ind = net_ind.join(gross_ind)
net_ind['Net'] = net_ind['Net_unadj'] / net_ind['Gross']
net_ind['Net - 1wk delta'] = net_ind.groupby(level=['Sector', 'Industry'])['Net'].diff(
5).fillna(0)
net_ind['Net - 1mo delta'] = net_ind.groupby(level=['Sector', 'Industry'])['Net'].diff(
20).fillna(0)
net_ind.reset_index(level=['Sector', 'Industry'], drop=False, inplace=True)
gross_sec = pos.groupby(['data_date', 'Sector']).weight_abs.sum().to_frame('Gross')
net_sec = pos.groupby(['data_date', 'Sector']).weight.sum().to_frame('Net_unadj')
net_sec = net_sec.join(gross_sec)
net_sec['Net'] = net_sec['Net_unadj'] / net_sec['Gross']
net_sec['Net - 1wk delta'] = net_sec.groupby(level=['Sector'])['Net'].diff(5).fillna(0)
net_sec['Net - 1mo delta'] = net_sec.groupby(level=['Sector'])['Net'].diff(20).fillna(0)
net_sec.reset_index(level=['Sector'], drop=False, inplace=True)
net_sec['Industry'] = 'All'
max_date = pos.data_date.max()
exposures = pd.concat([net_ind.loc[max_date], net_sec.loc[max_date]], ignore_index=True)
exposures = exposures.drop('Net_unadj', axis=1)
# build context
context = {'data': exposures.to_dict(orient='records')}
return Response(context)
class SignalsLatestView(APIView):
def get(self, request, format=None):
filepath = os.path.join(DataDir, 'equities_signals_latest.parquet')
signals = pd.read_parquet(filepath)
signals = signals[
['data_date', 'ticker', 'market_cap', 'Sector', 'Industry', 'SignalConfidence',
'SignalDirection']]
signals.market_cap.fillna(0, inplace=True)
signals = signals[signals.Sector.notnull()]
# build context
context = {'data': signals.to_dict(orient='records')}
return Response(context)
class SignalsSecIndView(APIView):
def get(self, request, format=None):
filepath = os.path.join(DataDir, 'equities_signals_sec_ind.parquet')
signals = pd.read_parquet(filepath)
signals = signals[~signals.Sector.isin(['', 'Index'])]
context = {'data': signals.to_dict(orient='records')}
return Response(context)
class SignalsSectorTableView(APIView):
def post(self, request, format=None):
sector = request.data['sector']
filepath = os.path.join(DataDir, 'equities_signals_latest.parquet')
signals = pd.read_parquet(filepath) # , where='Sector=="%s"' % sector)
signals = signals[signals.Sector == sector]
# build context
context = {'data': signals.to_dict(orient='records')}
return Response(context)
class SignalsIndustryTableView(APIView):
def post(self, request, format=None):
industry = request.data['industry']
filepath = os.path.join(DataDir, 'equities_signals_latest.parquet')
signals = pd.read_parquet(filepath) # , where='Industry=="%s"' % industry)
signals = signals[signals.Industry == industry]
# build context
context = {'data': signals.to_dict(orient='records')}
return Response(context)
class SignalsTickerView(APIView):
def post(self, request, format=None):
ticker = request.data['ticker']
include_it_data = request.data['include_it_data']
ticker = ticker.upper()
## find company name and cik
sm = pd.read_parquet(os.path.join(DataDir, 'sec_master.parquet'))
sm = sm[sm.ticker == ticker]
if len(sm) == 1:
comp_name = sm.iloc[0].proper_name
cik = sm.iloc[0].cik
else:
return Response({'signal_data_found': False})
filepath = os.path.join(DataDir, 'equities_signals_full.hdf')
signal_data_columns = ['data_date', 'market_cap', 'ticker', 'Sector', 'Industry', 'close',
'adj_close', 'SignalConfidence']
signals = pd.read_hdf(filepath, 'table', where='ticker=="%s"' % ticker)[signal_data_columns]
signals = signals[signals.SignalConfidence.notnull()]
## Check if signal data exists
if not len(signals):
return Response({'signal_data_found': False})
# build context
context = {'ticker': ticker, 'Name': comp_name, 'CIK': cik,
'Sector': signals.Sector.iloc[-1],
'Industry': signals.Industry.iloc[-1],
'Market Cap': signals.market_cap.iloc[-1],
'signal_data': signals[['data_date', 'adj_close', 'SignalConfidence']].to_dict(orient='records'),
'signal_data_found': True}
if include_it_data:
if pd.isnull(cik):
it_data = pd.DataFrame()
context['it_data_found'] = False
return Response(context)
# get cik forms
filepath = os.path.join(DataDir, 'sec_forms_ownership_source_full.hdf')
forms = pd.read_hdf(filepath, 'table', where='IssuerCIK == "%s"' % cik)
forms.sort_values('AcceptedDate', ascending=False, inplace=True)
forms = forms[(forms.valid_purchase + forms.valid_sale) != 0]
forms['Direction'] = 'Buy'
forms['Direction'] = forms.Direction.where(forms.valid_purchase == 1, 'Sell')
forms = forms[~forms.TransType.isin(['LDG', 'HO', 'RB'])]
cols = ['SECAccNumber', 'URL', 'AcceptedDate', 'FilerName', 'InsiderTitle',
'Director', 'TenPercentOwner', 'TransType', 'DollarValue', 'Direction']
forms = forms[cols].copy()
forms.reset_index(inplace=True, drop=True)
forms['tableIndex'] = forms.index
forms['AcceptedDateDate'] = pd.to_datetime(forms.AcceptedDate.apply(lambda x: x.date()))
graph_markers = signals.merge(forms, left_on='data_date', right_on='AcceptedDateDate')
def add_count(x):
return (pd.Series(index = x.index,data = range(len(x))))
graph_markers['marker_count'] = graph_markers.groupby(['data_date','Direction'],as_index=False,group_keys=False).apply(lambda x: add_count(x))
graph_markers['marker_count'] = graph_markers['marker_count'] + 1
graph_markers = graph_markers[
['data_date', 'tableIndex', 'FilerName', 'TransType', 'DollarValue', 'Direction','adj_close','marker_count']]
graph_markers.fillna(0, inplace=True)
forms.fillna(0, inplace=True)
context['graph_markers'] = graph_markers.to_dict(orient='records')
context['forms_table'] = forms.to_dict(orient='records')
return Response(context)
class CorrelationView(APIView):
def post(self, request, format=None):
aggregation = request.data['aggregation']
lookback = request.data['lookback']
corr_threshold = request.data['corr_threshold']
graph = request.data['graph']
if not graph:
dislocations = pd.read_csv(DataDir + '/correlation_network_files/dislocations_' + str(
aggregation) + 'minute_' + lookback + '_lookback.csv')
dislocations = dislocations[dislocations.weight >= corr_threshold].reset_index(drop=True)
dislocations = dislocations[['ticker1', 'ticker2', 'weight',
'comp1_H_1day_abs_return', 'comp2_H_1day_abs_return', 'delta_1day',
'comp1_H_3day_abs_return', 'comp2_H_3day_abs_return', 'delta_3day',
'comp1_H_5day_abs_return', 'comp2_H_5day_abs_return', 'delta_5day']]
dislocations = dislocations.reindex(dislocations.delta_5day.abs().sort_values(ascending=False).index)
context = {'data': dislocations.to_dict(orient='records')}
else:
df_corrmat = pd.read_csv(DataDir + '/correlation_network_files/corr_matrix_' + str(
aggregation) + 'minute_' + lookback + '_lookback.csv').set_index(keys=['Unnamed: 0'], drop=True)
df_nodes = pd.read_csv(DataDir + '/correlation_network_files/node_info.csv')
node_list = pd.DataFrame(df_corrmat.index.tolist()).reset_index(drop=False).rename(
columns={'index': 'node_id', 0: 'ticker'})
df_list = df_corrmat.unstack()
df_list = pd.DataFrame(df_list, columns=['weight'])
df_list.index.names = ['ticker1', 'ticker2']
df_list = df_list.reset_index(drop=False)
df_list = df_list[df_list.weight != 1].copy()
df_list = pd.merge(df_list, node_list, left_on=['ticker1'], right_on=['ticker'], how='outer').drop(
labels=['ticker1', 'ticker'], axis=1).rename(columns={'node_id': 'node1'})
df_list = pd.merge(df_list, node_list, left_on=['ticker2'], right_on=['ticker'], how='outer').drop(
labels=['ticker2', 'ticker'], axis=1).rename(columns={'node_id': 'node2'})
df_list = df_list[['node1', 'node2', 'weight']].copy()
df_list = df_list[(df_list.weight >= corr_threshold) | (df_list.weight <= -1 * corr_threshold)].copy()
edge_list = df_list[['node1', 'node2']].values.tolist()
g = igraph.Graph()
g.add_vertices(node_list.node_id.max() + 1)
g.add_edges(edge_list)
weight_list = [abs(i) for i in df_list.weight.tolist()]
g.es['weight'] = weight_list
mst_edge_ids = g.spanning_tree(weights=weight_list, return_tree=False)
mst_edges_list = [g.get_edgelist()[i] for i in mst_edge_ids]
mst_edges_weights = [g.es['weight'][i] for i in mst_edge_ids]
mst_edges = pd.DataFrame(mst_edges_list, columns=['node1', 'node2'])
mst_edges = pd.merge(mst_edges, pd.DataFrame(mst_edges_weights, columns=['weight']), left_index=True,
right_index=True)
mst_edges = pd.merge(mst_edges, node_list, left_on='node1', right_on='node_id').drop(
labels=['node_id', 'node1'], axis=1)
mst_edges = pd.merge(mst_edges, node_list, left_on='node2', right_on='node_id').drop(
labels=['node_id', 'node2'], axis=1)
mst_edges = mst_edges.rename(columns={'ticker_x': 'ticker1', 'ticker_y': 'ticker2'})
mst_edges = mst_edges[['ticker1', 'ticker2', 'weight']].copy()
# mst_edges = pd.merge(mst_edges, df_nodes, left_on='ticker1', right_on='ticker').rename(columns={'comp_name':'comp_name1','Sector':'comp1_sector','Industry':'comp1_industry','Industry Group':'comp1_industry_group'}).drop(labels=['ticker'], axis=1)
# mst_edges = pd.merge(mst_edges, df_nodes, left_on='ticker2', right_on='ticker').rename(columns={'comp_name':'comp_name2','Sector':'comp2_sector','Industry':'comp2_industry','Industry Group':'comp2_industry_group'}).drop(labels=['ticker'], axis=1)
mst_nodes = list(set(mst_edges.ticker1.unique().tolist() + mst_edges.ticker2.unique().tolist()))
mst_nodes = df_nodes[df_nodes.ticker.isin(mst_nodes)].reset_index(drop=True)
# mst_edges.to_csv('./sp500_mst_edges_minute.csv', index=False)
# mst_nodes.to_csv('./sp500_mst_nodes_minute.csv', index=False)
nodes, edges = self.create_graph_data(mst_nodes, mst_edges)
context = {'nodes': nodes.to_dict(orient='records'),
'edges': edges.to_dict(orient='records')}
return Response(context)
def create_graph_data(self, nodes, edges):
colors = {'Industrials': 'LightBlue',
'Health Care': 'PaleGoldenRod',
'Financials': 'Crimson',
'Consumer Staples': 'Lavender',
'Consumer Discretionary': 'Wheat',
'Utilities': 'GreenYellow',
'Information Technology': 'GoldenRod',
'Energy': 'WhiteSmoke',
'Materials': 'LightSlateGray',
'Real Estate': 'Lime',
'Telecommunication Services': 'Gold'}
nodes = nodes.drop('Industry Group', axis=1)
nodes = nodes.rename(columns={'ticker': 'label', 'comp_name': 'name'})
nodes['title'] = nodes.apply(lambda x: 'Name: %s<br>Sec: %s<br> ind: %s' % (x['name'], x.Sector, x.Industry),
axis=1)
nodes['color'] = nodes.Sector.map(colors)
nodes['x'] = 1
nodes['y'] = nodes['x']
nodes['id'] = nodes.index + 1
nodes['radius'] = 10
nodes['color'] = nodes.color.apply(lambda x: {'background': x})
edges['from'] = edges.ticker1.map(nodes.set_index('label')['id'])
edges['to'] = edges.ticker2.map(nodes.set_index('label')['id'])
edges = edges[['from', 'to', 'weight']].copy()
edges.columns = ['from', 'to', 'title']
edges.title = edges.title.round(2)
edges['width'] = edges.title * 10
edges['id'] = edges.index + 1
edges['color'] = 'black'
edges['color'] = edges.color.apply(lambda x: {'color': x})
return (nodes, edges)
class NetworkView(APIView):
def get(self, request, format=None):
colors = {"Computer and Technology": "LightBlue",
"Medical": "PaleGoldenRod",
"Transportation": "Chocolate",
"Business Services": "Crimson",
"Utilities": "Lavender",
"Finance": "Wheat",
"Industrial PRODUCTS": "GreenYellow",
"Multi-Sector Conglomerates": "GoldenRod",
"Auto-Tires-Trucks": "WhiteSmoke",
"Construction": "LightSlateGray",
"Oils-Energy": "Lime",
"Basic Materials": "Magenta",
"Retail-Wholesale": "Gold",
"Consumer Staples": "Orange",
"Aerospace": "Peru",
"Consumer Discretionary": "MintCream"}
nodes = pd.read_csv(DataDir + '/sp500_mst_nodes.csv')
nodes = nodes.drop('zacks_x_ind_desc', axis=1)
nodes = nodes.rename(columns={'ticker': 'label', 'comp_name': 'name', 'Sector': 'Sector',
'Industry': 'industry'})
nodes['title'] = nodes.apply(lambda x: 'Name: %s<br>Sec: %s<br> ind: %s' % (x['name'], x.Sector, x.industry),
axis=1)
nodes['color'] = nodes.Sector.map(colors)
nodes['x'] = 1
nodes['y'] = nodes['x']
nodes['id'] = nodes.index + 1
nodes['radius'] = 10
nodes['color'] = nodes.color.apply(lambda x: {'background': x})
edges = pd.read_csv(DataDir + '/sp500_mst_edges.csv')
edges['from'] = edges.ticker1.map(nodes.set_index('label')['id'])
edges['to'] = edges.ticker2.map(nodes.set_index('label')['id'])
edges = edges[['from', 'to', 'weight']]
edges.columns = ['from', 'to', 'title']
edges.title = edges.title.round(2)
edges['width'] = edges.title * 10
edges['id'] = edges.index + 1
edges['color'] = 'black'
edges['color'] = edges.color.apply(lambda x: {'color': x})
context = {'my_nodes': nodes.to_dict(orient='records'),
'my_edges': edges.to_dict(orient='records')}
return Response(context)
class FactorReturns(APIView):
def post(self, request, format=None):
start_date = request.data['start_date']
end_date = request.data['end_date']
selected_factors = request.data['selected_factors']
returns = pd.read_parquet(os.path.join(DataDir,'factor_returns.parquet'))
style_factors = ['Dividend_Yield', 'Earnings_Yield',
'Exchange_Rate_Sensitivity', 'Growth', 'Leverage',
'Liquidity', 'Market_Sensitivity', 'Medium_Term_Momentum',
'MidCap', 'Profitability', 'Size', 'Value', 'Volatility','Short_Term_Momentum']
new_names = []
for c in returns.columns:
if c in style_factors:
new_names.append('Style: '+c)
elif c == 'Market_Intercept':
new_names.append('Market: Market_Intercept')
else:
new_names.append('Industry: '+c)
returns.columns = new_names
returns = returns
available_dates = returns.index.tolist()
all_factors = sorted(returns.columns.tolist(), reverse=True)
start_date = returns.index.min() if start_date == '' else pd.to_datetime(start_date)
end_date = returns.index.min() if end_date == '' else pd.to_datetime(end_date)
selected_factors = selected_factors if len(selected_factors) else ['Style: Growth', 'Style: Value']
returns = returns[start_date:end_date][selected_factors]
returns.iloc[0] = 0
#returns = (1 + returns).cumprod()
returns.reset_index(inplace=True)
# construct factor group table
f = os.path.join(DataDir,'AXUS4-SH.hry.csv')
df = pd.read_csv(f,sep='|',comment='#',header=None)
f_o = open(f,mode='r')
file_data = f_o.readlines()
for l in file_data:
if 'Columns' in l:
columns = re.sub('#Columns: ','',l.rstrip()).split('|')
df.columns = columns
sectors = df[df.Level=='Sectors']
industry_groups = df[df.Level=='Industry Groups']
industries = df[df.Level=='Industries']
industries = industries.rename(columns = {'Parent':'Industry Groups','Name':'Industries'})
industries['Sectors'] = industries['Industry Groups'].map(industry_groups.set_index('Name')['Parent'])
industries['Sectors'] = industries['Sectors'].str.replace('-S','')
df = industries[['Sectors','Industries']].copy()
df.columns = ['Group','Factor']
df = df.sort_values(['Group','Factor'])
sf = pd.DataFrame(style_factors,columns = ['Factor'])
sf['Group'] = 'Style'
sf = sf.sort_values(['Group','Factor'])
df = pd.concat([sf,df],ignore_index=True)[['Group','Factor']]
# create context
context = {'all_factors': all_factors, 'available_dates': available_dates,
'data': returns.to_dict(orient='records'),'factor_table_data':df.to_dict(orient='records')}
return Response(context)
| StarcoderdataPython |
167622 | <reponame>Amazinggrace-Oduye/inventory_app<filename>updating_inventoryApp.py
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 21:29:59 2020
@author: <NAME>
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 23 20:43:03 2020
@author: <NAME>
"""
#importing modules
from tkinter import *
import tkinter.messagebox
import sqlite3
#-------------------------------------------------------------------------------
'''creating class for front end user interface'''
class product:
def __init__(self,root):
'''creating object instance of database class in product class and \
connecting the objecct instance to connection function'''
objData = database()
objData.connecting()
self.root = root
self.root.title('AMAZING COLLECTIONS STOCK')
self.root.geometry('1325x690')
self.root.config(bg='yellow')
'''declareing variables as stringvars'''
PName = StringVar()
Colour = StringVar()
Qty = StringVar()
Size= StringVar()
CostPrice = StringVar()
SellPrice = StringVar()
Expense = StringVar()
newQty = StringVar()
Pid = StringVar()
#-----------------------------------------------------------------------------#
'''creating functions for operation button and database\
operation methode call'''
def close():
print('application: close methode called')
close = tkinter.messagebox.askyesno('AMAZING COLLECTIONS STOCK',\
'Do you want to close the application')
if close > 0:
root.destroy()
print('application: close methode finish\n')
return
#def clear():
# print('application: close methode called')
#self.entryPName.delete(0,1)
#self.entryColour.delete(0,1)
#self.entryQty.delete(0,1)
#self.entrySize.delete(0,1)
#self.entryCostPrice.delete(0,1)
#self.entryExpense.delete(0,1)
#self.entrySellPrice.delete(0,1)
#self.entryPid.delete(0,1)
#print('application: clear methode finish\n')
#return
def reset():
print('application: reset methode called')
self.entryPName.delete(0,END)
self.entryColour.delete(0,END)
self.entryQty.delete(0,END)
self.entrySize.delete(0,END)
self.entryCostPrice.delete(0,END)
self.entryExpense.delete(0,END)
self.entrySellPrice.delete(0,END)
self.entryPid.delete(0,END)
self.entryQty2.delete(0,END)
print('application: reset methode finish\n')
return
#----------------------------------------------------------------------------------#
'''referencing database functions with this fuctions to save\
product details OR retrieve details in databae'''
#-------saving product ddetails on data base
def insertData():
print('application: insert methode called')
if (len(PName.get()) != 0):
objData.insert(PName.get(),Colour.get(),Qty.get(),Size.get(),
CostPrice.get(),Expense.get(),SellPrice.get())
productList.delete(0,END)
productList.insert(END,(PName.get(),Colour.get(),Qty.get(),
Size.get(),CostPrice.get(),Expense.get(),SellPrice.get()))
else:
tkinter.messagebox.askokcancel('AMAZING COLLECTIONS STOCK',\
'You must provid the product name')
print('application: insert methode finish\n')
def showStock():
print('application: showStock methode called')
productList.delete(0,END)
for row in objData.show():
productList.insert(END,row,str(''))
print('application: showStock methode finish\n')
'''Using Event Driven Function'''
# --------using curselection methode to query current selection and get the result-----
def showSelection(event):
print('application: showSelection methode called')
global result
saerchClick = productList.curselection()[0] #return list index
result = productList.get(saerchClick)
#print(result)
'''Assigning cursor event to all entry'''
self.entryPid.delete(0,END)
self.entryPid.insert(END,result[0])
self.entryPName.delete(0,END)
self.entryPName.insert(END,result[1])
self.entryColour.delete(0,END)
self.entryColour.insert(END,result[2])
self.entryQty.delete(0,END)
self.entryQty.insert(END,result[3])
#print(result[3])
self.entrySize.delete(0,END)
self.entrySize.insert(END,result[4])
self.entryCostPrice.delete(0,END)
self.entryCostPrice.insert(END,result[5])
self.entryExpense.delete(0,END)
self.entryExpense.insert(END,result[6])
self.entrySellPrice.delete(0,END)
self.entrySellPrice.insert(END,result[7])
self.entryQty2.delete(0,END)
print('application: showSelection methode finished\n')
'''DELETING RECORD FROM DATA BASE BY SELECTIN DAIA FROM LISTBOX'''
def deleteData():
print('application: deletData methode called')
if (len(PName.get()) != 0):
objData.delete(result[0])
reset()
showStock()
print('application: deletData methode finished\n')
'''SEARCHING FOR DATABASE CONTENT USING ANY DETAIL IN DATA BASE'''
def searchData():
print('application: searchData methode called')
productList.delete(0,END)
for row in objData.search(PName.get(),Colour.get(),Qty.get(),Size.get(),
CostPrice.get(),Expense.get(),SellPrice.get()):
productList.insert(END,row,str(''))
print('application: searchData methode finished\n')
def updateDate():
print('application:updateData methode called')
if (len(PName.get()) != 0):
objData.delete(result[0])
if (len(PName.get()) != 0):
num = int(result[3])
Qtysum = num + int(newQty.get())
objData.insert(PName.get(),Colour.get(),Qtysum,Size.get(),
CostPrice.get(),Expense.get(),SellPrice.get())
productList.delete(0,END)
productList.insert(END,(PName.get(),Colour.get(),Qty.get(),
Size.get(),CostPrice.get(),Expense.get(),SellPrice.get()))
print('application:updateData methode finished\n')
'''Creating different Frames GUI'''
#-------------------------------------------------------------------------------
mainFrame = Frame(self.root,bg='red')
mainFrame.grid()
headFrame = Frame(mainFrame,bg='white',bd=1,padx=45,pady=10,
relief=RIDGE)
headFrame.pack(side=TOP)
self.labelTitle = Label(headFrame,font=('arial',40,'bold'),fg='red',
text='AMAZING COLLECTIONS STOCK INVENTORY',bg='white')
self.labelTitle.grid()
operationFrame = Frame(mainFrame,bg='white',bd=2,width=1300,height=45,
padx=45,pady=10,relief=RIDGE)
operationFrame.pack(side=BOTTOM)
bodyFrame = Frame(mainFrame,bg='white',bd=2,width=1280,height=500,
padx=30,pady=20,relief=RIDGE)
bodyFrame.pack(side=BOTTOM)
'''creating frames containing label 'LabelFrame'''
leftbodyFrame = LabelFrame(bodyFrame,bg='yellow',bd=2,width=600,height=280,
padx=20,pady=10,relief=RIDGE,
font=('arial',15,'bold'),text='Stock Items:')
leftbodyFrame.pack(side=LEFT)
rightbodyFrame = LabelFrame(bodyFrame,bg='yellow',bd=2,width=450,height=400,
padx=20,pady=10,relief=RIDGE,
font=('arial',15,'bold'),text='Stock Items Details:')
rightbodyFrame.pack(side=RIGHT)
''' adding widget or coponents with their label'''
#------------------------------------------------------------------------------
#self.Pidlabel = Label(leftbodyFrame,bg='white',padx=2,fg='blue',
#font=('arial',15,'bold'),text='Product id:')
#self.Pidlabel.grid(row=0,column=0,sticky=W)
#self.entryPid = Entry(leftbodyFrame,bg='white',width=35,
#font=('arial',20,'bold'),textvariable=Pid)
#self.entryPid.grid(row=0,column=1,sticky=W)
self.PNamelabel = Label(leftbodyFrame,bg='white',padx=2,fg='blue',
font=('arial',15,'bold'),text='Product Name:')
self.PNamelabel.grid(row=1,column=0,sticky=W)
self.entryPName = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=PName)
self.entryPName.grid(row=1,column=1,sticky=W)
self.Colourlabel = Label(leftbodyFrame,bg='white',padx=2,fg='blue',
font=('arial',15,'bold'),text='Product Colour:')
self.Colourlabel.grid(row=2,column=0,sticky=W)
self.entryColour = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=Colour)
self.entryColour.grid(row=2,column=1,sticky=W)
self.Qtylabel = Label(leftbodyFrame,bg='white',padx=2,fg='blue',
font=('arial',15,'bold'),text='Product Quantity:')
self.Qtylabel.grid(row=3,column=0,sticky=W)
self.entryQty = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=Qty)
self.entryQty.grid(row=3,column=1,sticky=W)
self.Sizelabel = Label(leftbodyFrame,bg='white',padx=2,fg='blue',
font=('arial',15,'bold'),text='Product Size:')
self.Sizelabel.grid(row=4,column=0,sticky=W)
self.entrySize = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=Size)
self.entrySize.grid(row=4,column=1,sticky=W)
self.CostPricelabel = Label(leftbodyFrame,bg='white',padx=2,fg='blue',
font=('arial',15,'bold'),text='Cost Price:')
self.CostPricelabel.grid(row=5,column=0,sticky=W)
self.entryCostPrice = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=CostPrice)
self.entryCostPrice.grid(row=5,column=1,sticky=W)
self.Expenselabel = Label(leftbodyFrame,bg='white',padx=2,fg='blue',
font=('arial',15,'bold'),text='Expense:')
self.Expenselabel.grid(row=6,column=0,sticky=W)
self.entryExpense = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=Expense)
self.entryExpense.grid(row=6,column=1,sticky=W)
self.SellPricelabel = Label(leftbodyFrame,bg='white',padx=2,fg='blue',
font=('arial',15,'bold'),text='Selling Price:')
self.SellPricelabel.grid(row=7,column=0,sticky=W)
self.entrySellPrice = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=SellPrice)
self.entrySellPrice.grid(row=7,column=1,sticky=W)
self.dommy2 = Label(leftbodyFrame,padx=2,bg='yellow')
self.dommy2.grid(row=8,column=0,sticky=W)
self.dommy2 = Label(leftbodyFrame,padx=2,bg='yellow')
self.dommy2.grid(row=9,column=0,sticky=W)
self.dommy2 = Label(leftbodyFrame,padx=2,bg='yellow')
self.dommy2.grid(row=10,column=0,sticky=W)
self.dommy2 = Label(rightbodyFrame,padx=2,bg='yellow')
self.dommy2.grid(row=8,column=0,sticky=W)
self.dommy2 = Label(rightbodyFrame,padx=2,bg='yellow')
self.dommy2.grid(row=9,column=0,sticky=W)
self.dommy2 = Label(rightbodyFrame,padx=2,bg='yellow')
self.dommy2.grid(row=10,column=0,sticky=W)
self.Pidlabel = Label(leftbodyFrame,bg='white',padx=2,fg='red',
font=('arial',15,'bold'),text='Product id:')
self.Pidlabel.grid(row=11,column=0,sticky=W)
self.entryPid = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=Pid)
self.entryPid.grid(row=11,column=1,sticky=W)
self.Qty2label = Label(leftbodyFrame,bg='white',padx=2,fg='red',
font=('arial',15,'bold'),text='Additional Quantity:')
self.Qty2label.grid(row=12,column=0,sticky=W)
self.entryQty2 = Entry(leftbodyFrame,bg='white',width=35,
font=('arial',20,'bold'),textvariable=newQty)
self.entryQty2.grid(row=12,column=1,sticky=W)
#-------------------------------------------------------------------------------
'''Adding Scroll Bar To listbox widget'''
scroll = Scrollbar(rightbodyFrame)
scroll.grid(row=0,column=1,sticky=NS)
'''Creating a listbox and Adding listbox to scrollbar '''
productList = Listbox(rightbodyFrame,width=40,height=16,
font=('arial',12,'bold'),yscrollcommand=scroll.set)
productList.grid(row=0,column=0)
'''setting the scrollbar command to vertical view of the listbox'''
scroll.config(command=productList.yview)
'''Binding cursor Event and Handler methode Selection to listbox/productlist'''
productList.bind('<<ListboxSelect>>',showSelection)
#-------------------------------------------------------------------------------#
'''Adding Buttons to the Operation Frame'''
self.btSave = Button(operationFrame,text='Save', font=('arial',15,'bold'),
height=1,width=10,bd=4,command=insertData)
self.btSave.grid(row=0,column=0)
self.btShowdata = Button(operationFrame,text='Show Data', font=('arial',15,'bold'),
height=1,width=10,bd=4,command=showStock)
self.btShowdata.grid(row=0,column=1)
#self.btClear = Button(operationFrame,text='Clear', font=('arial',15,'bold'),
#height=1,width=10,bd=4,command=clear)
#self.btClear.grid(row=0,column=2)
self.btDelete = Button(operationFrame,text='Delete', font=('arial',15,'bold'),
height=1,width=10,bd=4,command=deleteData)
self.btDelete.grid(row=0,column=3)
self.btSearch = Button(operationFrame,text='Search', font=('arial',15,'bold'),
height=1,width=10,bd=4,command=searchData)
self.btSearch.grid(row=0,column=4)
self.btUpdate = Button(operationFrame,text='Update', font=('arial',15,'bold'),
height=1,width=10,bd=4,command=updateDate)
self.btUpdate.grid(row=0,column=5)
self.btClose = Button(operationFrame,text='Close', font=('arial',15,'bold'),
height=1,width=10,bd=4,command=close)
self.btClose.grid(row=0,column=6)
self.btReset = Button(operationFrame,text='Reset', font=('arial',15,'bold'),
height=1,width=10,bd=4,command=reset)
self.btReset.grid(row=0,column=7)
'''End of Front End Graphic User interface section'''
#--------------------------------------------------------------------------------------#
#Back End Data Base Operation
#ceating or connetion database
#creating cursor
#creating table
#updating database
class database():
def connecting(self):
print('Database: connection methode called')
conn = sqlite3.connect('UPDATEINVENTORY.db')
coso = conn.cursor()
query ='CREATE TABLE IF NOT EXISTS mystockData(pid INTEGER PRIMARY KEY,name TEXT,\
colour TEXT,quantity TEXT,size TEXT,costprice TEXT,expense TEXT,sellprice TEXT)'
coso.execute(query)
conn.commit()
conn.close()
print('Database: connection methode finished \n')
def insert(self,name,colour,quantity,size,costprice,expense,sellprice):
print('Database: insert methode called')
conn = sqlite3.connect('UPDATEINVENTORY.db')
coso = conn.cursor()
query ='INSERT INTO mystockData VALUES(NULL,?,?,?,?,?,?,?)'
coso.execute(query,(name,colour,quantity,size,costprice,expense,sellprice))
conn.commit()
conn.close()
print('Database: insert methode finished \n')
def show(self):
print('Database: show methode called')
conn = sqlite3.connect('UPDATEINVENTORY.db')
coso = conn.cursor()
query ='SELECT * FROM mystockData'
coso.execute(query)
rows = coso.fetchall()
conn.commit()
conn.close()
print('Database: show methode finished \n')
return rows
def delete(self,pid):
print('Database: delete methode called')
conn = sqlite3.connect('UPDATEINVENTORY.db')
coso = conn.cursor()
query ='DELETE FROM mystockData WHERE pid=?'
coso.execute(query,[pid]) # add the str because of valueError because pid type is text
conn.commit()
conn.close()
print('Database: delete methode finished \n')
def search(self,name='',colour='',quantity='',size='',costprice='',\
expense='',sellprice=''):
print('Database: search methode called')
conn = sqlite3.connect('UPDATEINVENTORY.db')
coso = conn.cursor()
query ='SELECT * FROM mystockData WHERE name=? or colour=?\
or quantity=? or size=? or costprice=? or expense=? or sellprice=?'
coso.execute(query,(name,colour,quantity,size,costprice,expense,sellprice))
rows = coso.fetchall()
conn.commit()
conn.close()
print('Database: search methode finished \n')
return rows
def update(self,pid,name='',colour='',quantity='',size='',costprice='',\
expense='',sellprice=''):
print('Database: update methode called')
conn = sqlite3.connect('UPDATEINVENTORY.db')
coso = conn.cursor()
query ='UPDATE mystockData SET name=?,colour=?,quantity=?,size=?,\
costprice=?,expense=?,sellprice=? WHERE pid=?'
coso.execute(query,(name,colour,quantity,size,costprice,expense,sellprice,pid))
conn.commit()
conn.close()
print('Database: update methode finished \n')
if __name__ == '__main__':
root = Tk()
application = product(root)
root.mainloop()
| StarcoderdataPython |
22662 | # Copyright 2017 Rice University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from program_helper.ast.ops import DAPIInvoke
from synthesis.ops.candidate_ast import SYMTAB_MOD, TYPE_NODE, API_NODE, VAR_NODE, OP_NODE, METHOD_NODE, CLSTYPE_NODE, \
VAR_DECL_NODE
class AstReverseMapper:
def __init__(self, vocab):
self.vocab = vocab
self.nodes, self.edges, self.targets = [], [], []
self.var_decl_ids = []
self.node_type_numbers = []
self.type_helper_val, self.expr_type_val, self.ret_type_val = [], [], []
self.num_data = 0
return
def add_data(self, nodes, edges, targets,
var_decl_ids,
node_type_number,
type_helper_val, expr_type_val, ret_type_val):
self.nodes.extend(nodes)
self.edges.extend(edges)
self.targets.extend(targets)
self.var_decl_ids.extend(var_decl_ids)
self.node_type_numbers.extend(node_type_number)
self.type_helper_val.extend(type_helper_val)
self.expr_type_val.extend(expr_type_val)
self.ret_type_val.extend(ret_type_val)
self.num_data += len(nodes)
def get_element(self, id):
return self.nodes[id], self.edges[id], self.targets[id], \
self.var_decl_ids[id], \
self.node_type_numbers[id], \
self.type_helper_val[id], self.expr_type_val[id], self.ret_type_val[id]
def decode_ast_paths(self, ast_element, partial=True):
nodes, edges, targets, \
var_decl_ids, \
node_type_numbers, \
type_helper_vals, expr_type_vals, ret_type_vals = ast_element
for node in nodes:
print(self.vocab.chars_concept[node], end=',')
print()
#
for edge in edges:
print(edge, end=',')
print()
for _, _, target, \
var_decl_id, \
node_type_numbers, \
type_helper_val, expr_type_val, ret_type_val in zip(*ast_element):
if node_type_numbers == SYMTAB_MOD:
print('--symtab--', end=',')
elif node_type_numbers == VAR_NODE:
print(self.vocab.chars_var[target], end=',')
elif node_type_numbers == VAR_DECL_NODE:
print(self.vocab.chars_var[target], end=',')
elif node_type_numbers == TYPE_NODE:
print(self.vocab.chars_type[target], end=',')
elif node_type_numbers == CLSTYPE_NODE:
print(self.vocab.chars_type[target], end=',')
elif node_type_numbers == API_NODE:
api = self.vocab.chars_api[target]
api = api.split(DAPIInvoke.delimiter())[0]
print(api, end=',')
elif node_type_numbers == OP_NODE:
op = self.vocab.chars_op[target]
print(op, end=',')
elif node_type_numbers == METHOD_NODE:
op = self.vocab.chars_method[target]
print(op, end=',')
else:
print(self.vocab.chars_concept[target], end=',')
print()
if not partial:
for var_decl_id in var_decl_ids:
print(var_decl_id, end=',')
print()
for type_helper_val in type_helper_vals:
print(self.vocab.chars_type[type_helper_val], end=',')
print()
for expr_type_val in expr_type_vals:
print(self.vocab.chars_type[expr_type_val], end=',')
print()
for ret_type_val in ret_type_vals:
print(self.vocab.chars_type[ret_type_val], end=',')
print()
print()
def reset(self):
self.nodes, self.edges, self.targets = [], [], []
self.var_decl_ids = []
self.node_type_numbers = []
self.type_helper_val, self.expr_type_val, self.ret_type_val = [], [], []
self.num_data = 0 | StarcoderdataPython |
3259041 | # Import ParaMol modules
from ParaMol.System.system import *
from ParaMol.Force_field.force_field import *
from ParaMol.Parameter_space.parameter_space import *
import numpy as np
class TestParameterSpace:
# Kwargs dictionary for AMBER topology system. These are shared between all instances.
kwargs_dict = {"topology_format": "AMBER",
"crd_format": "AMBER",
"top_file": "ParaMol/Tests/aniline.prmtop",
"crd_file": "ParaMol/Tests/aniline.inpcrd"}
def test_get_optimizable_parameters(self):
"""
Test the function that obtains the optimizable parameters.
"""
openmm_engine = OpenMMEngine(True, **self.kwargs_dict)
assert type(openmm_engine) is OpenMMEngine
system = ParaMolSystem(name="aniline", engine=openmm_engine, n_atoms=14)
assert type(system.force_field) is ForceField
system.force_field.create_force_field(opt_bonds=True, opt_angles=True, opt_torsions=True, opt_charges=True, opt_lj=True, opt_sc=True, ff_file=None)
parameter_space = ParameterSpace()
assert type(parameter_space) is ParameterSpace
optimizable_parameters, optimizable_parameters_values = parameter_space.get_optimizable_parameters([system])
assert len(optimizable_parameters_values) == 232
# Assert force groups
optimizable_parameters_values_to_compare = np.asarray([0.10860000000000002, 289365.44000000006, 0.10860000000000002, 289365.44000000006, 0.10860000000000002, 289365.44000000006, 0.10120000000000001,
338569.28, 0.10120000000000001, 338569.28, 0.10860000000000002, 289365.44000000006, 0.10860000000000002, 289365.44000000006, 0.1398,
385848.48000000004, 0.1398, 385848.48000000004, 0.1398, 385848.48000000004, 0.1398, 385848.48000000004, 0.1386, 349698.72000000003, 0.1398,
385848.48000000004, 0.1398, 385848.48000000004, 2.0923016, 403.33760000000007, 2.0923016, 403.33760000000007, 2.0923016, 403.33760000000007,
2.0923016, 403.33760000000007, 2.0923016, 403.33760000000007, 2.0923016, 403.33760000000007, 2.0923016, 403.33760000000007, 2.02580453,
405.01120000000003, 2.02580453, 405.01120000000003, 2.0923016, 403.33760000000007, 2.0092239, 335.5568, 2.0923016, 403.33760000000007,
2.0923016, 403.33760000000007, 2.09474507, 557.3088, 2.09474507, 557.3088, 2.09474507, 557.3088, 2.09474507, 557.3088, 2.11097664, 571.5344,
2.09474507, 557.3088, 2.09474507, 557.3088, 2.11097664, 571.5344, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167,
3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 4.3932, 3.141594,
4.3932, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 4.3932, 3.141594, 4.3932, 3.141594,
15.167, 3.141594, 4.6024, 3.141594, 4.6024, 3.141594, 4.6024, 3.141594, 4.6024, 3.141594, 4.6024, 3.141594, 4.6024, 3.141594, 15.167, 3.141594,
15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 15.167, 3.141594, 4.6024, 0.131,
0.2599642458735085, 0.06276000026869928, -0.173, 0.33996695079448314, 0.35982400053705343, -0.093, 0.33996695079448314, 0.35982400053705343,
0.129, 0.2599642458735085, 0.06276000026869928, -0.191, 0.33996695079448314, 0.35982400053705343, 0.12999999999999998, 0.2599642458735085,
0.06276000026869928, 0.13659999999999997, 0.33996695079448314, 0.35982400053705343, -0.8182000021951126, 0.3249998524031036, 0.7112799996555186,
0.3868, 0.10690784617205229, 0.06568880001765333, 0.3868, 0.10690784617205229, 0.06568880001765333, -0.191, 0.33996695079448314, 0.35982400053705343,
0.12999999999999998, 0.2599642458735085, 0.06276000026869928, -0.093, 0.33996695079448314, 0.35982400053705343, 0.129, 0.2599642458735085,
0.06276000026869928, 0.8333333333333334, 0.5, 0.8333333333333333, 0.5, 0.8333333333333333, 0.5, 0.8333333333333334, 0.5, 0.8333333333333333, 0.5,
0.8333333333333333, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5,
0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5,
0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5, 0.8333333333333335, 0.5, 0.8333333333333334, 0.5, 0.8333333333333334, 0.5,
0.8333333333333334, 0.5, 0.8333333333333334, 0.5])
optimizable_parameters_values = np.asarray(optimizable_parameters_values)
np.testing.assert_almost_equal(optimizable_parameters_values_to_compare, optimizable_parameters_values)
def test_calculate_parameters_magnitudes(self):
"""
Test the function that calculates the magnitudes of the parameters.
"""
openmm_engine = OpenMMEngine(True, **self.kwargs_dict)
assert type(openmm_engine) is OpenMMEngine
system = ParaMolSystem(name="aniline", engine=openmm_engine, n_atoms=14)
assert type(system.force_field) is ForceField
system.force_field.create_force_field(opt_bonds=True, opt_angles=True, opt_torsions=True, opt_charges=True, opt_lj=True, opt_sc=True, ff_file=None)
parameter_space = ParameterSpace()
assert type(parameter_space) is ParameterSpace
_, _ = parameter_space.get_optimizable_parameters([system])
# Geometric
parameters_magnitudes_dict, prior_widths = parameter_space.calculate_parameters_magnitudes(method="geometric")
dict_geometric = {'bond_eq': 0.05,
'bond_k': 339330.6478320703,
'angle_eq': 2.08431266115259,
'angle_k': 453.49623302587844,
'torsion_phase': 3.141594,
'torsion_k': 10.370921339254062,
'charge': 0.5,
'lj_sigma': 0.3,
'lj_eps': 0.2,
'scee': 0.8333333333333334,
'scnb': 0.5}
for param_type in dict_geometric.keys():
assert abs((dict_geometric[param_type]-parameters_magnitudes_dict[param_type])) < 1e-8
# Arithmetic
parameters_magnitudes_dict, prior_widths = parameter_space.calculate_parameters_magnitudes(method="arithmetic")
dict_arithmetic = {'bond_eq': 0.12305714285714285,
'bond_k': 342053.95428571437,
'angle_eq': 2.084489174285714,
'angle_k': 460.2798476190477,
'torsion_phase': 3.1415939999999987,
'torsion_k': 11.822788571428562,
'charge': 0.22274285729965088,
'lj_sigma': 0.27703131992011787,
'lj_eps': 0.23681440030404585,
'scee': 0.8333333333333333,
'scnb': 0.5}
for param_type in dict_arithmetic.keys():
assert abs((dict_arithmetic[param_type]-parameters_magnitudes_dict[param_type])) < 1e-8
def test_jacobi_preconditioning(self):
"""
Test the jacobi preconditioning function.
"""
openmm_engine = OpenMMEngine(True, **self.kwargs_dict)
assert type(openmm_engine) is OpenMMEngine
system = ParaMolSystem(name="aniline", engine=openmm_engine, n_atoms=14)
assert type(system.force_field) is ForceField
system.force_field.create_force_field(opt_bonds=True, opt_angles=True, opt_torsions=True, opt_charges=True, opt_lj=True, opt_sc=True, ff_file=None)
parameter_space = ParameterSpace()
assert type(parameter_space) is ParameterSpace
_, _ = parameter_space.get_optimizable_parameters([system])
# Perform jacobi preconditioning
parameter_space.calculate_scaling_constants("arithmetic")
assert parameter_space.preconditioned is False
parameter_space.jacobi_preconditioning()
assert parameter_space.preconditioned is True
optimizable_parameters_values_scaled_to_compare = np.asarray([ 0.88251683, 0.84596432, 0.88251683, 0.84596432, 0.88251683,
0.84596432, 0.82238217, 0.9898125 , 0.82238217, 0.9898125 ,
0.88251683, 0.84596432, 0.88251683, 0.84596432, 1.13605758,
1.12803397, 1.13605758, 1.12803397, 1.13605758, 1.12803397,
1.13605758, 1.12803397, 1.12630601, 1.02234959, 1.13605758,
1.12803397, 1.13605758, 1.12803397, 1.00374789, 0.87628777,
1.00374789, 0.87628777, 1.00374789, 0.87628777, 1.00374789,
0.87628777, 1.00374789, 0.87628777, 1.00374789, 0.87628777,
1.00374789, 0.87628777, 0.97184699, 0.87992382, 0.97184699,
0.87992382, 1.00374789, 0.87628777, 0.9638927 , 0.72902779,
1.00374789, 0.87628777, 1.00374789, 0.87628777, 1.0049201 ,
1.21080426, 1.0049201 , 1.21080426, 1.0049201 , 1.21080426,
1.0049201 , 1.21080426, 1.01270693, 1.24171067, 1.0049201 ,
1.21080426, 1.0049201 , 1.21080426, 1.01270693, 1.24171067,
1. , 1.28286148, 1. , 1.28286148, 1. ,
1.28286148, 1. , 1.28286148, 1. , 1.28286148,
1. , 1.28286148, 1. , 1.28286148, 1. ,
1.28286148, 1. , 1.28286148, 1. , 1.28286148,
1. , 0.37158746, 1. , 0.37158746, 1. ,
1.28286148, 1. , 1.28286148, 1. , 1.28286148,
1. , 1.28286148, 1. , 1.28286148, 1. ,
0.37158746, 1. , 0.37158746, 1. , 1.28286148,
1. , 0.3892821 , 1. , 0.3892821 , 1. ,
0.3892821 , 1. , 0.3892821 , 1. , 0.3892821 ,
1. , 0.3892821 , 1. , 1.28286148, 1. ,
1.28286148, 1. , 1.28286148, 1. , 1.28286148,
1. , 1.28286148, 1. , 1.28286148, 1. ,
1.28286148, 1. , 1.28286148, 1. , 0.3892821 ,
0.58812211, 0.93839298, 0.26501767, -0.77668035, 1.22717876,
1.51943463, -0.41752181, 1.22717876, 1.51943463, 0.57914315,
0.93839298, 0.26501767, -0.85749102, 1.22717876, 1.51943463,
0.58363263, 0.93839298, 0.26501767, 0.61326321, 1.22717876,
1.51943463, -3.673294 , 1.17315202, 3.00353356, 1.73653155,
0.38590527, 0.27738516, 1.73653155, 0.38590527, 0.27738516,
-0.85749102, 1.22717876, 1.51943463, 0.58363263, 0.93839298,
0.26501767, -0.41752181, 1.22717876, 1.51943463, 0.57914315,
0.93839298, 0.26501767, 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. , 1. , 1. , 1. ,
1. , 1. ])
np.testing.assert_almost_equal(optimizable_parameters_values_scaled_to_compare, parameter_space.optimizable_parameters_values_scaled)
def test_update_systems(self):
"""
Test the update systems function.
"""
openmm_engine = OpenMMEngine(True, **self.kwargs_dict)
assert type(openmm_engine) is OpenMMEngine
system = ParaMolSystem(name="aniline", engine=openmm_engine, n_atoms=14)
assert type(system.force_field) is ForceField
system.force_field.create_force_field(opt_bonds=True, opt_angles=True, opt_torsions=True, opt_charges=True, opt_lj=True, opt_sc=True, ff_file=None)
parameter_space = ParameterSpace()
assert type(parameter_space) is ParameterSpace
_, old_values = parameter_space.get_optimizable_parameters([system])
parameters = np.ones(len(old_values))
parameter_space.update_systems([system], parameters)
_, new_param = parameter_space.get_optimizable_parameters([system])
np.testing.assert_almost_equal(parameters, new_param)
| StarcoderdataPython |
3200731 | <gh_stars>1-10
import argparse
from collections import defaultdict
from os import remove
from random import randrange
import genanki
from pycasia import CASIA
from hsk import HSK
from models import get_model
EXAMPLE_COUNT = 50
def create_deck(name, character_list=None, example_count=30):
"""
Create a deck with the given requirements.
:param name: The deck name
:param character_list: A list of characters to select. If not given, all characters in the dataset will be used.
:param example_count: How many examples per character to include. Default is 30.
:return: Nothing.
"""
# Must be unique. See genanki details for more.
deck_id = randrange(1 << 30, 1 << 31)
print("Creating deck %s" % name)
# Create deck
deck = genanki.Deck(deck_id, name)
# Initialize data collection
data = CASIA.CASIA()
deck_data = defaultdict(list)
media = []
# Get data and create media
characters_loaded = 0
for image, character in data.load_character_images():
# Only include requested characters
if character_list is None or character in character_list:
# Only include as many examples as requested
count = len(deck_data[character])
if count < example_count:
filename = "%s_%s.jpg" % (character, len(deck_data[character]) + 1)
image.save(filename)
deck_data[character].append(filename)
media.append(filename)
characters_loaded = characters_loaded + 1
# Early stop if you have enough examples
if character_list is None or characters_loaded >= len(character_list) * example_count:
if len([character for character in deck_data if len(deck_data[character]) < example_count]) == 0:
break
# Create notes
print("Creating notes")
for character in deck_data:
note_fields = [character]
examples = ["<img src=\"%s\">" % image for image in deck_data[character]]
assert len(examples) == example_count, "Wrong number of examples for %s" % character
note_fields.extend(examples)
my_note = genanki.Note(model=get_model(example_count=example_count), fields=note_fields)
deck.add_note(my_note)
# Create the package and output
print("Creating final output")
package = genanki.Package(deck)
package.media_files = media
filename = '%s.apkg' % name
package.write_to_file(filename)
print("Created deck %s" % filename)
# Delete all intermediate files
print("Cleaning up")
for path in media:
remove(path)
def make_hsk_decks():
create_deck("HSK1", character_list=HSK["HSK1"], example_count=EXAMPLE_COUNT)
create_deck("HSK2", character_list=HSK["HSK2"], example_count=EXAMPLE_COUNT)
create_deck("HSK3", character_list=HSK["HSK3"], example_count=EXAMPLE_COUNT)
create_deck("HSK4", character_list=HSK["HSK4"], example_count=EXAMPLE_COUNT)
create_deck("HSK5", character_list=HSK["HSK5"], example_count=EXAMPLE_COUNT)
create_deck("HSK6", character_list=HSK["HSK6"], example_count=EXAMPLE_COUNT)
def main():
# make_hsk_decks()
parser = argparse.ArgumentParser(description='Create Anki decks based on characters .')
parser.add_argument('name', nargs=1, type=str, help='What do we call the deck?')
parser.add_argument('--count', nargs=1, type=int, help="How many examples to create", required=False)
parser.add_argument('characters', nargs='*', type=str, help="Which characters should we use?")
args = parser.parse_args()
deck_name = args.name[0]
characters = args.characters
if args.count is not None:
example_count = args.count[0]
create_deck(deck_name, character_list=characters, example_count=example_count)
else:
create_deck(deck_name, character_list=characters)
if __name__ == '__main__':
main()
| StarcoderdataPython |
112117 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
import numpy as np
import paddle
from paddle import nn
from paddle.nn import functional as F
class Stretch2D(nn.Layer):
def __init__(self, w_scale: int, h_scale: int, mode: str="nearest"):
"""Strech an image (or image-like object) with some interpolation.
Parameters
----------
w_scale : int
Scalar of width.
h_scale : int
Scalar of the height.
mode : str, optional
Interpolation mode, modes suppored are "nearest", "bilinear",
"trilinear", "bicubic", "linear" and "area",by default "nearest"
For more details about interpolation, see
`paddle.nn.functional.interpolate <https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/nn/functional/interpolate_en.html>`_.
"""
super().__init__()
self.w_scale = w_scale
self.h_scale = h_scale
self.mode = mode
def forward(self, x):
"""
Parameters
----------
x : Tensor
Shape (N, C, H, W)
Returns
-------
Tensor
Shape (N, C, H', W'), where ``H'=h_scale * H``, ``W'=w_scale * W``.
The stretched image.
"""
out = F.interpolate(
x, scale_factor=(self.h_scale, self.w_scale), mode=self.mode)
return out
class UpsampleNet(nn.Layer):
"""A Layer to upsample spectrogram by applying consecutive stretch and
convolutions.
Parameters
----------
upsample_scales : List[int]
Upsampling factors for each strech.
nonlinear_activation : Optional[str], optional
Activation after each convolution, by default None
nonlinear_activation_params : Dict[str, Any], optional
Parameters passed to construct the activation, by default {}
interpolate_mode : str, optional
Interpolation mode of the strech, by default "nearest"
freq_axis_kernel_size : int, optional
Convolution kernel size along the frequency axis, by default 1
use_causal_conv : bool, optional
Whether to use causal padding before convolution, by default False
If True, Causal padding is used along the time axis, i.e. padding
amount is ``receptive field - 1`` and 0 for before and after,
respectively.
If False, "same" padding is used along the time axis.
"""
def __init__(self,
upsample_scales: List[int],
nonlinear_activation: Optional[str]=None,
nonlinear_activation_params: Dict[str, Any]={},
interpolate_mode: str="nearest",
freq_axis_kernel_size: int=1,
use_causal_conv: bool=False):
super().__init__()
self.use_causal_conv = use_causal_conv
self.up_layers = nn.LayerList()
for scale in upsample_scales:
stretch = Stretch2D(scale, 1, interpolate_mode)
assert freq_axis_kernel_size % 2 == 1
freq_axis_padding = (freq_axis_kernel_size - 1) // 2
kernel_size = (freq_axis_kernel_size, scale * 2 + 1)
if use_causal_conv:
padding = (freq_axis_padding, scale * 2)
else:
padding = (freq_axis_padding, scale)
conv = nn.Conv2D(
1, 1, kernel_size, padding=padding, bias_attr=False)
self.up_layers.extend([stretch, conv])
if nonlinear_activation is not None:
nonlinear = getattr(
nn, nonlinear_activation)(**nonlinear_activation_params)
self.up_layers.append(nonlinear)
def forward(self, c):
"""
Parameters
----------
c : Tensor
Shape (N, F, T), spectrogram
Returns
-------
Tensor
Shape (N, F, T'), where ``T' = upsample_factor * T``, upsampled
spectrogram
"""
c = c.unsqueeze(1)
for f in self.up_layers:
if self.use_causal_conv and isinstance(f, nn.Conv2D):
c = f(c)[:, :, :, c.shape[-1]]
else:
c = f(c)
return c.squeeze(1)
class ConvInUpsampleNet(nn.Layer):
"""A Layer to upsample spectrogram composed of a convolution and an
UpsampleNet.
Parameters
----------
upsample_scales : List[int]
Upsampling factors for each strech.
nonlinear_activation : Optional[str], optional
Activation after each convolution, by default None
nonlinear_activation_params : Dict[str, Any], optional
Parameters passed to construct the activation, by default {}
interpolate_mode : str, optional
Interpolation mode of the strech, by default "nearest"
freq_axis_kernel_size : int, optional
Convolution kernel size along the frequency axis, by default 1
aux_channels : int, optional
Feature size of the input, by default 80
aux_context_window : int, optional
Context window of the first 1D convolution applied to the input. It
related to the kernel size of the convolution, by default 0
If use causal convolution, the kernel size is ``window + 1``, else
the kernel size is ``2 * window + 1``.
use_causal_conv : bool, optional
Whether to use causal padding before convolution, by default False
If True, Causal padding is used along the time axis, i.e. padding
amount is ``receptive field - 1`` and 0 for before and after,
respectively.
If False, "same" padding is used along the time axis.
"""
def __init__(self,
upsample_scales: List[int],
nonlinear_activation: Optional[str]=None,
nonlinear_activation_params: Dict[str, Any]={},
interpolate_mode: str="nearest",
freq_axis_kernel_size: int=1,
aux_channels: int=80,
aux_context_window: int=0,
use_causal_conv: bool=False):
super().__init__()
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
self.conv_in = nn.Conv1D(
aux_channels,
aux_channels,
kernel_size=kernel_size,
bias_attr=False)
self.upsample = UpsampleNet(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv)
def forward(self, c):
"""
Parameters
----------
c : Tensor
Shape (N, F, T), spectrogram
Returns
-------
Tensors
Shape (N, F, T'), where ``T' = upsample_factor * T``, upsampled
spectrogram
"""
c_ = self.conv_in(c)
c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_
return self.upsample(c)
class ResidualBlock(nn.Layer):
"""A gated activation unit composed of an 1D convolution, a gated tanh
unit and parametric redidual and skip connections. For more details,
refer to `WaveNet: A Generative Model for Raw Audio <https://arxiv.org/abs/1609.03499>`_.
Parameters
----------
kernel_size : int, optional
Kernel size of the 1D convolution, by default 3
residual_channels : int, optional
Feature size of the resiaudl output(and also the input), by default 64
gate_channels : int, optional
Output feature size of the 1D convolution, by default 128
skip_channels : int, optional
Feature size of the skip output, by default 64
aux_channels : int, optional
Feature size of the auxiliary input (e.g. spectrogram), by default 80
dropout : float, optional
Probability of the dropout before the 1D convolution, by default 0.
dilation : int, optional
Dilation of the 1D convolution, by default 1
bias : bool, optional
Whether to use bias in the 1D convolution, by default True
use_causal_conv : bool, optional
Whether to use causal padding for the 1D convolution, by default False
"""
def __init__(self,
kernel_size: int=3,
residual_channels: int=64,
gate_channels: int=128,
skip_channels: int=64,
aux_channels: int=80,
dropout: float=0.,
dilation: int=1,
bias: bool=True,
use_causal_conv: bool=False):
super().__init__()
self.dropout = dropout
if use_causal_conv:
padding = (kernel_size - 1) * dilation
else:
assert kernel_size % 2 == 1
padding = (kernel_size - 1) // 2 * dilation
self.use_causal_conv = use_causal_conv
self.conv = nn.Conv1D(
residual_channels,
gate_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias_attr=bias)
if aux_channels is not None:
self.conv1x1_aux = nn.Conv1D(
aux_channels, gate_channels, kernel_size=1, bias_attr=False)
else:
self.conv1x1_aux = None
gate_out_channels = gate_channels // 2
self.conv1x1_out = nn.Conv1D(
gate_out_channels, residual_channels, kernel_size=1, bias_attr=bias)
self.conv1x1_skip = nn.Conv1D(
gate_out_channels, skip_channels, kernel_size=1, bias_attr=bias)
def forward(self, x, c):
"""
Parameters
----------
x : Tensor
Shape (N, C_res, T), the input features.
c : Tensor
Shape (N, C_aux, T), the auxiliary input.
Returns
-------
res : Tensor
Shape (N, C_res, T), the residual output, which is used as the
input of the next ResidualBlock in a stack of ResidualBlocks.
skip : Tensor
Shape (N, C_skip, T), the skip output, which is collected among
each layer in a stack of ResidualBlocks.
"""
x_input = x
x = F.dropout(x, self.dropout, training=self.training)
x = self.conv(x)
x = x[:, :, x_input.shape[-1]] if self.use_causal_conv else x
if c is not None:
c = self.conv1x1_aux(c)
x += c
a, b = paddle.chunk(x, 2, axis=1)
x = paddle.tanh(a) * F.sigmoid(b)
skip = self.conv1x1_skip(x)
res = (self.conv1x1_out(x) + x_input) * math.sqrt(0.5)
return res, skip
class PWGGenerator(nn.Layer):
"""Wave Generator for Parallel WaveGAN
Parameters
----------
in_channels : int, optional
Number of channels of the input waveform, by default 1
out_channels : int, optional
Number of channels of the output waveform, by default 1
kernel_size : int, optional
Kernel size of the residual blocks inside, by default 3
layers : int, optional
Number of residual blocks inside, by default 30
stacks : int, optional
The number of groups to split the residual blocks into, by default 3
Within each group, the dilation of the residual block grows
exponentially.
residual_channels : int, optional
Residual channel of the residual blocks, by default 64
gate_channels : int, optional
Gate channel of the residual blocks, by default 128
skip_channels : int, optional
Skip channel of the residual blocks, by default 64
aux_channels : int, optional
Auxiliary channel of the residual blocks, by default 80
aux_context_window : int, optional
The context window size of the first convolution applied to the
auxiliary input, by default 2
dropout : float, optional
Dropout of the residual blocks, by default 0.
bias : bool, optional
Whether to use bias in residual blocks, by default True
use_weight_norm : bool, optional
Whether to use weight norm in all convolutions, by default True
use_causal_conv : bool, optional
Whether to use causal padding in the upsample network and residual
blocks, by default False
upsample_scales : List[int], optional
Upsample scales of the upsample network, by default [4, 4, 4, 4]
nonlinear_activation : Optional[str], optional
Non linear activation in upsample network, by default None
nonlinear_activation_params : Dict[str, Any], optional
Parameters passed to the linear activation in the upsample network,
by default {}
interpolate_mode : str, optional
Interpolation mode of the upsample network, by default "nearest"
freq_axis_kernel_size : int, optional
Kernel size along the frequency axis of the upsample network, by default 1
"""
def __init__(self,
in_channels: int=1,
out_channels: int=1,
kernel_size: int=3,
layers: int=30,
stacks: int=3,
residual_channels: int=64,
gate_channels: int=128,
skip_channels: int=64,
aux_channels: int=80,
aux_context_window: int=2,
dropout: float=0.,
bias: bool=True,
use_weight_norm: bool=True,
use_causal_conv: bool=False,
upsample_scales: List[int]=[4, 4, 4, 4],
nonlinear_activation: Optional[str]=None,
nonlinear_activation_params: Dict[str, Any]={},
interpolate_mode: str="nearest",
freq_axis_kernel_size: int=1):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aux_channels = aux_channels
self.aux_context_window = aux_context_window
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
assert layers % stacks == 0
layers_per_stack = layers // stacks
self.first_conv = nn.Conv1D(
in_channels, residual_channels, 1, bias_attr=True)
self.upsample_net = ConvInUpsampleNet(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
aux_channels=aux_channels,
aux_context_window=aux_context_window,
use_causal_conv=use_causal_conv)
self.upsample_factor = np.prod(upsample_scales)
self.conv_layers = nn.LayerList()
for layer in range(layers):
dilation = 2**(layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
dilation=dilation,
dropout=dropout,
bias=bias,
use_causal_conv=use_causal_conv)
self.conv_layers.append(conv)
self.last_conv_layers = nn.Sequential(nn.ReLU(),
nn.Conv1D(
skip_channels,
skip_channels,
1,
bias_attr=True),
nn.ReLU(),
nn.Conv1D(
skip_channels,
out_channels,
1,
bias_attr=True))
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, c):
"""Generate waveform.
Parameters
----------
x : Tensor
Shape (N, C_in, T), The input waveform.
c : Tensor
Shape (N, C_aux, T'). The auxiliary input (e.g. spectrogram). It
is upsampled to match the time resolution of the input.
Returns
-------
Tensor
Shape (N, C_out, T), the generated waveform.
"""
c = self.upsample_net(c)
assert c.shape[-1] == x.shape[-1]
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, s = f(x, c)
skips += s
skips *= math.sqrt(1.0 / len(self.conv_layers))
x = self.last_conv_layers(skips)
return x
def apply_weight_norm(self):
"""Recursively apply weight normalization to all the Convolution layers
in the sublayers.
"""
def _apply_weight_norm(layer):
if isinstance(layer, (nn.Conv1D, nn.Conv2D)):
nn.utils.weight_norm(layer)
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Recursively remove weight normalization from all the Convolution
layers in the sublayers.
"""
def _remove_weight_norm(layer):
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
pass
self.apply(_remove_weight_norm)
def inference(self, c=None):
"""Waveform generation. This function is used for single instance
inference.
Parameters
----------
c : Tensor, optional
Shape (T', C_aux), the auxiliary input, by default None
x : Tensor, optional
Shape (T, C_in), the noise waveform, by default None
If not provided, a sample is drawn from a gaussian distribution.
Returns
-------
Tensor
Shape (T, C_out), the generated waveform
"""
x = paddle.randn(
[1, self.in_channels, paddle.shape(c)[0] * self.upsample_factor])
c = paddle.transpose(c, [1, 0]).unsqueeze(0) # pseudo batch
c = nn.Pad1D(self.aux_context_window, mode='replicate')(c)
out = self(x, c).squeeze(0).transpose([1, 0])
return out
class PWGDiscriminator(nn.Layer):
"""A convolutional discriminator for audio.
Parameters
----------
in_channels : int, optional
Number of channels of the input audio, by default 1
out_channels : int, optional
Output feature size, by default 1
kernel_size : int, optional
Kernel size of convolutional sublayers, by default 3
layers : int, optional
Number of layers, by default 10
conv_channels : int, optional
Feature size of the convolutional sublayers, by default 64
dilation_factor : int, optional
The factor with which dilation of each convolutional sublayers grows
exponentially if it is greater than 1, else the dilation of each
convolutional sublayers grows linearly, by default 1
nonlinear_activation : str, optional
The activation after each convolutional sublayer, by default "LeakyReLU"
nonlinear_activation_params : Dict[str, Any], optional
The parameters passed to the activation's initializer, by default
{"negative_slope": 0.2}
bias : bool, optional
Whether to use bias in convolutional sublayers, by default True
use_weight_norm : bool, optional
Whether to use weight normalization at all convolutional sublayers,
by default True
"""
def __init__(
self,
in_channels: int=1,
out_channels: int=1,
kernel_size: int=3,
layers: int=10,
conv_channels: int=64,
dilation_factor: int=1,
nonlinear_activation: str="LeakyReLU",
nonlinear_activation_params: Dict[str, Any]={"negative_slope": 0.2},
bias: bool=True,
use_weight_norm: bool=True):
super().__init__()
assert kernel_size % 2 == 1
assert dilation_factor > 0
conv_layers = []
conv_in_channels = in_channels
for i in range(layers - 1):
if i == 0:
dilation = 1
else:
dilation = i if dilation_factor == 1 else dilation_factor**i
conv_in_channels = conv_channels
padding = (kernel_size - 1) // 2 * dilation
conv_layer = nn.Conv1D(
conv_in_channels,
conv_channels,
kernel_size,
padding=padding,
dilation=dilation,
bias_attr=bias)
nonlinear = getattr(
nn, nonlinear_activation)(**nonlinear_activation_params)
conv_layers.append(conv_layer)
conv_layers.append(nonlinear)
padding = (kernel_size - 1) // 2
last_conv = nn.Conv1D(
conv_in_channels,
out_channels,
kernel_size,
padding=padding,
bias_attr=bias)
conv_layers.append(last_conv)
self.conv_layers = nn.Sequential(*conv_layers)
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""
Parameters
----------
x : Tensor
Shape (N, in_channels, num_samples), the input audio.
Returns
-------
Tensor
Shape (N, out_channels, num_samples), the predicted logits.
"""
return self.conv_layers(x)
def apply_weight_norm(self):
def _apply_weight_norm(layer):
if isinstance(layer, (nn.Conv1D, nn.Conv2D)):
nn.utils.weight_norm(layer)
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
def _remove_weight_norm(layer):
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
pass
self.apply(_remove_weight_norm)
class ResidualPWGDiscriminator(nn.Layer):
"""A wavenet-style discriminator for audio.
Parameters
----------
in_channels : int, optional
Number of channels of the input audio, by default 1
out_channels : int, optional
Output feature size, by default 1
kernel_size : int, optional
Kernel size of residual blocks, by default 3
layers : int, optional
Number of residual blocks, by default 30
stacks : int, optional
Number of groups of residual blocks, within which the dilation
of each residual blocks grows exponentially, by default 3
residual_channels : int, optional
Residual channels of residual blocks, by default 64
gate_channels : int, optional
Gate channels of residual blocks, by default 128
skip_channels : int, optional
Skip channels of residual blocks, by default 64
dropout : float, optional
Dropout probability of residual blocks, by default 0.
bias : bool, optional
Whether to use bias in residual blocks, by default True
use_weight_norm : bool, optional
Whether to use weight normalization in all convolutional layers,
by default True
use_causal_conv : bool, optional
Whether to use causal convolution in residual blocks, by default False
nonlinear_activation : str, optional
Activation after convolutions other than those in residual blocks,
by default "LeakyReLU"
nonlinear_activation_params : Dict[str, Any], optional
Parameters to pass to the activation, by default {"negative_slope": 0.2}
"""
def __init__(self,
in_channels: int=1,
out_channels: int=1,
kernel_size: int=3,
layers: int=30,
stacks: int=3,
residual_channels: int=64,
gate_channels: int=128,
skip_channels: int=64,
dropout: float=0.,
bias: bool=True,
use_weight_norm: bool=True,
use_causal_conv: bool=False,
nonlinear_activation: str="LeakyReLU",
nonlinear_activation_params: Dict[
str, Any]={"negative_slope": 0.2}):
super().__init__()
assert kernel_size % 2 == 1
self.in_channels = in_channels
self.out_channels = out_channels
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
assert layers % stacks == 0
layers_per_stack = layers // stacks
self.first_conv = nn.Sequential(
nn.Conv1D(in_channels, residual_channels, 1, bias_attr=True),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params))
self.conv_layers = nn.LayerList()
for layer in range(layers):
dilation = 2**(layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=None, # no auxiliary input
dropout=dropout,
dilation=dilation,
bias=bias,
use_causal_conv=use_causal_conv)
self.conv_layers.append(conv)
self.last_conv_layers = nn.Sequential(
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
nn.Conv1D(skip_channels, skip_channels, 1, bias_attr=True),
getattr(nn, nonlinear_activation)(**nonlinear_activation_params),
nn.Conv1D(skip_channels, out_channels, 1, bias_attr=True))
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""
Parameters
----------
x : Tensor
Shape (N, in_channels, num_samples), the input audio.
Returns
-------
Tensor
Shape (N, out_channels, num_samples), the predicted logits.
"""
x = self.first_conv(x)
skip = 0
for f in self.conv_layers:
x, h = f(x, None)
skip += h
skip *= math.sqrt(1 / len(self.conv_layers))
x = skip
x = self.last_conv_layers(x)
return x
def apply_weight_norm(self):
def _apply_weight_norm(layer):
if isinstance(layer, (nn.Conv1D, nn.Conv2D)):
nn.utils.weight_norm(layer)
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
def _remove_weight_norm(layer):
try:
nn.utils.remove_weight_norm(layer)
except ValueError:
pass
self.apply(_remove_weight_norm)
class PWGInference(nn.Layer):
def __init__(self, normalizer, pwg_generator):
super().__init__()
self.normalizer = normalizer
self.pwg_generator = pwg_generator
def forward(self, logmel):
normalized_mel = self.normalizer(logmel)
wav = self.pwg_generator.inference(normalized_mel)
return wav
| StarcoderdataPython |
3375224 | '''
Initial conditions for the sctipt "int_sis_1.py"
'''
import sympy as sym
from sympy.utilities.lambdify import lambdify
import numpy as np
import math
from scipy.constants import c as c_luz #metros/segundos
c_luz_km = c_luz/1000;
import os
import git
path_git = git.Repo('.', search_parent_directories=True).working_tree_dir
path_datos_global = os.path.dirname(path_git)
#os.chdir(path_git)
#os.sys.path.append('./Software/utils/')
#from cambio_parametros import params_fisicos_to_modelo_HS
def z_condicion_inicial(params_fisicos, eps=10**(-10)):
'''
z_i para el sistema de ecuaciones de Odintsov
x0 = -(1/3) * np.log((-omega_l/omega_m)*((np.log(eps)/beta)+2))
z0 = np.exp(-x0)-1
'''
[omega_m, b, _] = params_fisicos
beta = 2/b
omega_l = 1 - omega_m
#Defino el z inicial
zi = (2 * omega_l*(-np.log(eps)-2*beta)/(beta*omega_m))**(1/3) - 1
return zi
def condiciones_iniciales(params_fisicos, zi = 30, model = 'HS', CI_aprox=True):
'''
Calculo las condiciones iniciales para el sistema de ecuaciones diferenciales
para el modelo de Hu-Sawicki y el de Starobinsky n=1
OBSERVACION IMPORTANTE: Lamb, R_HS z=están reescalados por un factor H0**2 y
H reescalado por un facor H0. Esto es para librarnos de la dependencia de
las condiciones iniciales con H0. Además, como el output son adminensionales
podemos tomar c=1 (lo chequeamos en el papel).
'''
[omega_m, b, _] = params_fisicos
z = sym.Symbol('z')
E = (omega_m*(1+z)**3 + (1-omega_m))**(0.5)
if (model=='EXP' or model=='Odintsov'):
omega_l = 1-omega_m
tildeR = 2 + (omega_m/(2*(1 - omega_m))) * (1+z)**3
tildeR_ci = sym.lambdify(z,tildeR)
E_ci = sym.lambdify(z,E)
tildeR_i = tildeR_ci(zi)
E0 = E_ci(zi) #Esta ya noramlizado por H0!
return[E0, tildeR_i]
elif model=='HS':
R = sym.Symbol('R')
Lamb = 3 * (1-omega_m)
#c1,c2 = params_fisicos_to_modelo_HS(omega_m,b)
#R_HS = 2 * Lamb * c2/c1
R_HS = 6 * c_luz_km**2 * omega_m / (7800 * (8315)**2)
#En el codigo de Augusto:
#R_0 = 3 * (1-omega_m) * b (No me cierra)
R_0 = R_HS #No confundir con R_i que es R en la CI!
#Calculo F
F = R - 2 * Lamb * (1 - 1/ (1 + (R/(b*Lamb))) )
#Calculo las derivadas de F
F_R = sym.diff(F,R) #saqué el sym.simplify para que ande el modelo exp en su momento,
# pero ahora se podría agregar
F_2R = sym.diff(F_R,R)
E_z = sym.simplify(sym.diff(E,z))
#Como hay una independencia con H0 en los resultados finales, defino H=E para
# que en el resultado final den bien las unidades
H = E
H_z = E_z
Ricci = (12*H**2 + 6*H_z*(-H*(1+z)))
Ricci_t = sym.simplify(sym.diff(Ricci,z)*(-H*(1+z)))
Ricci_ci = sym.lambdify(z,Ricci)
Ricci_t_ci = sym.lambdify(z,Ricci_t)
H_ci = sym.lambdify(z,H)
H_z_ci = sym.lambdify(z,H_z)
F_ci = sym.lambdify(R,F)
F_R_ci = sym.lambdify(R,F_R)
F_2R_ci = sym.lambdify(R,F_2R)
R_i = Ricci_ci(zi)
#H_ci(zi) #Chequie que de lo esperado x Basilakos
#H_z_ci(zi) #Chequie que de lo esperado x Basilakos
if CI_aprox == True: #Hibrid initial conditions
xi = Ricci_t_ci(zi) * F_2R_ci(R_i) / (H_ci(zi) * F_R_ci(R_i))
yi = F_ci(R_i) / (6 * (H_ci(zi)**2) * F_R_ci(R_i))
vi = R_i / (6 * H_ci(zi)**2)
wi = 1 + xi + yi - vi
ri = R_i / R_0
else: #LCDM initial conditions
xi = 0
yi = (R_i - 2 * Lamb) / (6 * H_ci(zi)**2)
vi = R_i / (6 * H_ci(zi)**2)
wi = 1 + xi + yi - vi
ri = R_i / R_0
return[xi,yi,vi,wi,ri]
#%%
if __name__ == '__main__':
omega_m = 0.2
b = 0.6
params_fisicos = [omega_m, b, 0]
print(z_condicion_inicial(params_fisicos, eps=10**(-10)))
#%%
H0 = 73.48
zi = 30
cond_iniciales = condiciones_iniciales(params_fisicos, zi=zi, model='HS')
print(cond_iniciales)
#%%
bs = np.arange(0.2,1.1,0.1)
omegas = np.arange(0.2,0.51,0.01)
output = np.zeros((len(bs),len(omegas)))
for i, b in enumerate(bs):
for j, omega in enumerate(omegas):
params_fisicos = [omega_m,b,0]
cond_iniciales=condiciones_iniciales(params_fisicos,zi=3,
model='EXP')
output[i,j] = 2 * cond_iniciales[1]/b #lo convierto en r para comparar
#np.savetxt('2darray.csv', output, delimiter=',', fmt='%1.2f')
output
#%%
cond_iniciales_hibrid = condiciones_iniciales(params_fisicos, zi=zi, model='HS', CI_aprox=True)
cond_iniciales_LCDM = condiciones_iniciales(params_fisicos, zi=zi, model='HS', CI_aprox=False)
print(cond_iniciales_hibrid)
print(cond_iniciales_LCDM)
| StarcoderdataPython |
3293261 | #
# -*- coding: utf-8 -*-
# flake8: noqa F401
"""This simply imports certain things for backwards compatibility."""
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
from .thgcmd import ThgCmd, Statement, EmptyStatement, categorize
from .thgcmd import (
with_argument_list,
with_argparser,
with_argparser_and_unknown_args,
with_category,
)
from .pyscript_bridge import CommandResult
| StarcoderdataPython |
1665873 | <gh_stars>0
# rest_framework
from rest_framework.generics import ListCreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
# api serializers
from core.api.serializers import (
ContentSerializer,
IssueSerializer,
)
# models
from core.cooggerapp.models import (
Content,
Issue,
)
# views
from core.cooggerapp.views.utils import model_filter
class ListContent(ListCreateAPIView):
model = Content
serializer_class = ContentSerializer
permission_classes = []
def get_object(self):
queryset = self.get_queryset()
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data)
def filter_queryset(self, queryset):
return model_filter(
self.request.query_params.items(),
self.get_queryset()
).get("queryset")
def get_queryset(self):
return self.model.objects.all().order_by("created")
class ListIssue(ListContent):
model = Issue
serializer_class = IssueSerializer | StarcoderdataPython |
3287031 | <filename>py/closest_pair_test.py
import unittest
from py.closest_pair import ClosestPair
class TestClosestPair(unittest.TestCase):
def test_one_dimensional(self):
with open("../src/closest_pair/test_data/one_dimensional_points.txt") as values:
pair_util = ClosestPair([float(value) for value in values])
print(pair_util.one_dimensional())
def test_two_dimensional(self):
with open("../src/closest_pair/test_data/points.txt") as values:
string_pairs = [value.split(',') for value in values]
pair_util = ClosestPair([(float(pair[0]), float(pair[1])) for pair in string_pairs])
pair_util.two_dimensional()
self.assertAlmostEqual(pair_util.shortest_distance, 0.068070, 6)
closest_points = pair_util.closest_points
self.assertEqual(closest_points[0], (38523.977990, -33081.309257))
self.assertEqual(closest_points[1], (38524.043694, -33081.291468))
| StarcoderdataPython |
83852 | """ Contains classes and methods to obtain various regression based metrics to evaluate"""
from sklearn import metrics
import numpy as np
import pandas as pd
import math
import sys
sys.path.append("../config")
class MetricsEval:
"""MetricsEval Class
Evaluate metrics to evaluate model performance
"""
def metrics_eval_base(self,predicted_y, test_y,logs_path,run_id=0):
"""Get predicted and actual value for all KCCs and return regression metrics namely: Mean Absolute Error, Mean Squared Error, Root Mean Squared Error, R-Squared Value
:param predicted_y: predicted values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param predicted_y: actual values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param logs_path: Logs path to save the evaluation metrics
:type logs_path: str (required)
:returns: dictionary of all metrics for each KCC
:rtype: dict
:returns: dataframe of all metrics for each KCC
:rtype: pandas.dataframe
"""
kcc_dim=test_y.shape[1]
import kcc_config as kcc_config
#kcc_struct=kcc_config.get_kcc_struct()
kcc_struct=kcc_config.kcc_struct
# Calculating Regression Based Evaluation Metrics
mae_KCCs=np.zeros((kcc_dim))
mse_KCCs=np.zeros((kcc_dim))
r2_KCCs=np.zeros((kcc_dim))
#print(kcc_struct)
kcc_id=[]
for kcc in kcc_struct:
if(kcc['kcc_type']==1):
kcc_name=kcc['kcc_id']
kcc_id.append(kcc_name)
mae_KCCs=metrics.mean_absolute_error(predicted_y, test_y,multioutput='raw_values')
mse_KCCs=metrics.mean_squared_error(predicted_y, test_y,multioutput='raw_values')
r2_KCCs = metrics.r2_score(predicted_y, test_y,multioutput='raw_values')
#print(kcc_id)
rmse_KCCs=np.sqrt(mse_KCCs)
eval_metrics= {
"KCC_ID":kcc_id,
"Mean Absolute Error" : mae_KCCs,
"Mean Squared Error" : mse_KCCs,
"Root Mean Squared Error" : rmse_KCCs,
"R Squared" : r2_KCCs
}
#print(len(kcc_id),len(mae_KCCs),len(mae_KCCs),len(rmse_KCCs),len(r2_KCCs))
#print(eval_metrics)
accuracy_metrics_df=pd.DataFrame.from_dict(eval_metrics)
accuracy_metrics_df=accuracy_metrics_df.set_index('KCC_ID')
#accuracy_metrics_df.to_csv(logs_path+'/metrics.csv') #moved to function call
return eval_metrics,accuracy_metrics_df
def metrics_eval_classification(self,y_pred, y_true,logs_path,run_id=0):
"""Get predicted and actual value for all KCCs and return regression metrics namely: Mean Absolute Error, Mean Squared Error, Root Mean Squared Error, R-Squared Value
:param predicted_y: predicted values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param predicted_y: actual values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param logs_path: Logs path to save the evaluation metrics
:type logs_path: str (required)
:returns: dictionary of all metrics for each KCC
:rtype: dict
:returns: dataframe of all metrics for each KCC
:rtype: pandas.dataframe
"""
kcc_dim=y_true.shape[1]
import kcc_config as kcc_config
kcc_struct=kcc_config.get_kcc_struct()
# Calculating Regression Based Evaluation Metrics
kcc_id=[]
for kcc in kcc_struct:
if(kcc['kcc_type']==1):
kcc_name=kcc['kcc_id']
kcc_id.append(kcc_name)
acc_kccs=[]
f1_kccs=[]
pre_kccs=[]
recall_kccs=[]
roc_auc_kccs=[]
kappa_kccs=[]
from sklearn.metrics import accuracy_score,f1_score,precision_score,recall_score,roc_auc_score,cohen_kappa_score
for i in range(y_true.shape[1]):
#Binary Prediction arrray
y_pred_bin=np.where(y_pred[:,i] > 0.5, 1, 0)
acc_kccs.append(accuracy_score(y_true[:,i],y_pred_bin))
f1_kccs.append(f1_score(y_true[:,i],y_pred_bin))
pre_kccs.append(precision_score(y_true[:,i],y_pred_bin))
recall_kccs.append(recall_score(y_true[:,i],y_pred_bin))
kappa_kccs.append(cohen_kappa_score(y_true[:,i],y_pred_bin))
#Probablity based Scoring
roc_auc_kccs.append(roc_auc_score(y_true[:,i],y_pred[:,i]))
eval_metrics= {
"KCC_ID":kcc_id,
"Accuracy" : acc_kccs,
"F1" : f1_kccs,
"Precision" : pre_kccs,
"Recall" : recall_kccs,
"ROC_AUC":roc_auc_kccs,
"Kappa":kappa_kccs
}
accuracy_metrics_df=pd.DataFrame.from_dict(eval_metrics)
accuracy_metrics_df=accuracy_metrics_df.set_index('KCC_ID')
#accuracy_metrics_df.to_csv(logs_path+'/metrics.csv') #moved to function call
return eval_metrics,accuracy_metrics_df
def metrics_eval_cop(self,predicted_y, test_y,logs_path,run_id=0):
"""Get predicted and actual value for all KCCs and return regression metrics namely: Mean Absolute Error, Mean Squared Error, Root Mean Squared Error, R-Squared Value
:param predicted_y: predicted values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param predicted_y: actual values for the process parameters
:type conn_str: numpy.array [test_samples*kccs] (required)
:param logs_path: Logs path to save the evaluation metrics
:type logs_path: str (required)
:returns: dictionary of all metrics for each KCC
:rtype: dict
:returns: dataframe of all metrics for each KCC
:rtype: pandas.dataframe
"""
kcc_dim=test_y.shape[1]
mae_KCCs=np.zeros((kcc_dim))
mse_KCCs=np.zeros((kcc_dim))
r2_KCCs=np.zeros((kcc_dim))
mae_KCCs=metrics.mean_absolute_error(predicted_y, test_y,multioutput='raw_values')
mse_KCCs=metrics.mean_squared_error(predicted_y, test_y,multioutput='raw_values')
r2_KCCs = metrics.r2_score(predicted_y, test_y,multioutput='raw_values')
rmse_KCCs=np.sqrt(mse_KCCs)
r2_adjusted=np.zeros(kcc_dim)
from tqdm import tqdm
for i in tqdm(range(kcc_dim)):
y_cop_test_flat=test_y[:,i]
y_cop_pred_flat=predicted_y[:,i]
combined_array=np.stack([y_cop_test_flat,y_cop_pred_flat],axis=1)
filtered_array=combined_array[np.where(abs(combined_array[:,0]) >= 0)]
y_cop_test_vector=filtered_array[:,0:1]
y_cop_pred_vector=filtered_array[:,1:2]
#print(y_cop_pred_vector.shape)
r2_adjusted[i] = metrics.r2_score(y_cop_test_vector,y_cop_pred_vector,multioutput='raw_values')[0]
eval_metrics= {
"Mean Absolute Error" : mae_KCCs,
"Mean Squared Error" : mse_KCCs,
"Root Mean Squared Error" : rmse_KCCs,
"R Squared" : r2_KCCs,
"R Squared Adjusted" : r2_adjusted
}
accuracy_metrics_df=pd.DataFrame({'MAE':mae_KCCs,'MSE':mse_KCCs,'RMSE':rmse_KCCs,'R2':r2_KCCs,"R2_Adjusted":r2_adjusted},columns=['MAE','MSE','RMSE','R2',"R2_Adjusted"])
#accuracy_metrics_df.to_csv(logs_path+'/metrics.csv') #moved to function call
return eval_metrics,accuracy_metrics_df
def metrics_eval_aleatoric_model(self,predicted_y, test_y,logs_path):
kcc_dim=test_y.shape[1]
log_variance=y_pred[:,kcc_dim]
variance=np.exp(log_variance)
predicted_y_sub=predicted_y[:,0:(kcc_dim-1)]
standard_deviation=np.sqrt(variance)
avg_aleatoric_SD=np.mean(standard_deviation)
# Calculating Regression Based Evaluation Metrics
mae_KCCs=np.zeros((kcc_dim))
mse_KCCs=np.zeros((kcc_dim))
r2_KCCs=np.zeros((kcc_dim))
kcc_id=[]
for i in range(kcc_dim):
kcc_name="KCC_"+str(i+1)
kcc_id.append(kcc_name)
mae_KCCs=metrics.mean_absolute_error(predicted_y_sub, test_y,multioutput='raw_values')
mse_KCCs=metrics.mean_squared_error(predicted_y_sub, test_y,multioutput='raw_values')
r2_KCCs = metrics.r2_score(predicted_y_sub, test_y,multioutput='raw_values')
rmse_KCCs=sqrt(mse_KCCs)
eval_metrics= {
"Mean Absolute Error" : mae_KCCs,
"Mean Squared Error" : mse_KCCs,
"Root Mean Squared Error" : rmse_KCCs,
"R Squared" : r2_KCCs,
"Aleatoric Standard Deviation":avg_aleatoric_SD
}
accuracy_metrics_df=pd.DataFrame({'KCC_ID':kcc_id,'MAE':mae_KCCs,'MSE':mse_KCCs,'RMSE':rmse_KCCs,'R2':r2_KCCs})
accuracy_metrics_df.columns = ['KCC_ID','MAE','MSE','RMSE','R2']
accuracy_metrics_df.to_csv(logs_path+'/metrics.csv')
return eval_metrics | StarcoderdataPython |
3312455 | # -*- encoding: utf-8 -*-
"""
Created by <NAME> at 22/09/2021 at 23:08:17
Project: py_dss_tools [set, 2021]
"""
class CNData:
name = "CNData"
name_plural = "CNData"
columns = ['capradius', 'diacable', 'diains', 'diam', 'diastrand', 'emergamps', 'epsr', 'gmrac', 'gmrstrand',
'gmrunits', 'inslayer', 'k', 'like', 'normamps', 'rac', 'radius', 'radunits', 'ratings', 'rdc',
'rstrand', 'runits', 'seasons']
def __init__(self):
self.__capradius = None
self.__diacable = None
self.__diains = None
self.__diam = None
self.__diastrand = None
self.__emergamps = None
self.__epsr = None
self.__gmrac = None
self.__gmrstrand = None
self.__gmrunits = None
self.__inslayer = None
self.__k = None
self.__normamps = None
self.__rac = None
self.__radius = None
self.__radunits = None
self.__ratings = None
self.__rdc = None
self.__rstrand = None
self.__runits = None
self.__seasons = None
@property
def capradius(self):
return self.__capradius
@capradius.setter
def capradius(self, value):
self.__capradius = value
@property
def diacable(self):
return self.__diacable
@diacable.setter
def diacable(self, value):
self.__diacable = value
@property
def diains(self):
return self.__diains
@diains.setter
def diains(self, value):
self.__diains = value
@property
def diam(self):
return self.__diam
@diam.setter
def diam(self, value):
self.__diam = value
@property
def diastrand(self):
return self.__diastrand
@diastrand.setter
def diastrand(self, value):
self.__diastrand = value
@property
def emergamps(self):
return self.__emergamps
@emergamps.setter
def emergamps(self, value):
self.__emergamps = value
@property
def epsr(self):
return self.__epsr
@epsr.setter
def epsr(self, value):
self.__epsr = value
@property
def gmrac(self):
return self.__gmrac
@gmrac.setter
def gmrac(self, value):
self.__gmrac = value
@property
def gmrstrand(self):
return self.__gmrstrand
@gmrstrand.setter
def gmrstrand(self, value):
self.__gmrstrand = value
@property
def gmrunits(self):
return self.__gmrunits
@gmrunits.setter
def gmrunits(self, value):
self.__gmrunits = value
@property
def inslayer(self):
return self.__inslayer
@inslayer.setter
def inslayer(self, value):
self.__inslayer = value
@property
def k(self):
return self.__k
@k.setter
def k(self, value):
self.__k = value
@property
def normamps(self):
return self.__normamps
@normamps.setter
def normamps(self, value):
self.__normamps = value
@property
def rac(self):
return self.__rac
@rac.setter
def rac(self, value):
self.__rac = value
@property
def radius(self):
return self.__radius
@radius.setter
def radius(self, value):
self.__radius = value
@property
def radunits(self):
return self.__radunits
@radunits.setter
def radunits(self, value):
self.__radunits = value
@property
def ratings(self):
return self.__ratings
@ratings.setter
def ratings(self, value):
self.__ratings = value
@property
def rdc(self):
return self.__rdc
@rdc.setter
def rdc(self, value):
self.__rdc = value
@property
def rstrand(self):
return self.__rstrand
@rstrand.setter
def rstrand(self, value):
self.__rstrand = value
@property
def runits(self):
return self.__runits
@runits.setter
def runits(self, value):
self.__runits = value
@property
def seasons(self):
return self.__seasons
@seasons.setter
def seasons(self, value):
self.__seasons = value
| StarcoderdataPython |
56574 | <filename>wealthbot/ria/forms/__init__.py
from .riskQuestions import *
from .riaSearchClients import *
from .inviteProspect import *
from .suggestedPortfolio import *
from .riaClientAccount import *
| StarcoderdataPython |
1742792 | <gh_stars>1-10
#!/usr/bin/env python3
import MySQLdb
import sys
import os
from importlib import import_module
USING_DB = 'default'
if __name__ == '__main__':
if len(sys.argv) < 2:
print("File name of netflow required")
exit(1)
FNAME = sys.argv[1]
cur_dir = os.path.dirname(os.path.abspath(__file__))
tmp_dir = '/tmp/djing_flow'
tmp_ipuser_file = '/tmp/ipuser.txt'
tmp_dump_file = '%s/djing_flow_dump.tmp' % tmp_dir
os.chdir(cur_dir)
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.rename('/tmp/djing_flow/%s' % FNAME, tmp_dump_file)
sys.path.append('../../')
local_settings = import_module('djing.local_settings')
usedb = local_settings.DATABASES.get(USING_DB)
db = MySQLdb.connect(
host=usedb['HOST'],
user=usedb['USER'],
passwd=<PASSWORD>['PASSWORD'],
db=usedb['NAME'],
charset='utf8'
)
cursor = db.cursor()
sql = (
'SELECT INET_ATON(emps.ip) as uip, acc.id FROM abonent '
'LEFT JOIN base_accounts AS acc ON (acc.id = abonent.baseaccount_ptr_id) '
'LEFT JOIN abonent_ip_addresses AS ips ON (acc.id = ips.abon_id) '
'LEFT JOIN ip_pool_employed_ip AS emps ON (ips.ipleasemodel_id = emps.id) '
'WHERE INET_ATON(emps.ip) != 0;'
)
ln = cursor.execute(sql)
with open(tmp_ipuser_file, 'w') as f:
f.write("count: %d\n" % ln)
while True:
row = cursor.fetchone()
if row is None:
break
f.write("%d-%d\n" % row)
db.close()
os.system(
'/bin/bash -c "export LD_LIBRARY_PATH=. && '
'%(CUR_DIR)s/djing_flow %(TMP_IPUSER_FILE)s < %(TMP_DUMP)s | '
'/usr/bin/mysql -u%(DB_USER)s -h %(HOST)s -p %(DB_NAME)s --password=%(DB_<PASSWORD>"' % {
'CUR_DIR': cur_dir,
'TMP_IPUSER_FILE': tmp_ipuser_file,
'TMP_DUMP': tmp_dump_file,
'DB_USER': usedb['USER'],
'HOST': usedb['HOST'],
'DB_NAME': usedb['NAME'],
'DB_PASSW': usedb['PASSWORD']
})
os.remove(tmp_dump_file)
os.remove(tmp_ipuser_file)
| StarcoderdataPython |
11376 | from openpyxl import Workbook
wb = Workbook()
ws = wb.active
data = [
["Fruit", "Quantity"],
["Kiwi", 3],
["Grape", 15],
["Apple", 3],
["Peach", 3],
["Pomegranate", 3],
["Pear", 3],
["Tangerine", 3],
["Blueberry", 3],
["Mango", 3],
["Watermelon", 3],
["Blackberry", 3],
["Orange", 3],
["Raspberry", 3],
["Banana", 3]
]
for r in data:
ws.append(r)
ws.auto_filter.ref = "A1:B15"
ws.auto_filter.add_filter_column(0, ["Kiwi", "Apple", "Mango"])
ws.auto_filter.add_sort_condition("B2:B15")
wb.save("filtered.xlsx")
| StarcoderdataPython |
1768455 | <filename>seed/setting.py
import os
from dynaconf import Dynaconf
_root_path: str = os.path.dirname(os.path.abspath(__file__))
setting: Dynaconf = Dynaconf(
env=os.environ.get('ENV', 'development').lower(),
envvar_prefix='SEED',
environments=True,
settings_files=[
os.path.join(_root_path, './settings/secrets/.secrets.default.toml'),
os.path.join(_root_path, './settings/setting.default.toml'),
os.path.join(_root_path, './settings/secrets/.secrets.testing.toml'),
os.path.join(_root_path, './settings/setting.testing.toml')
]
)
| StarcoderdataPython |
1672942 | <filename>neutron/plugins/ml2/drivers/ovn/mech_driver/ovsdb/ovn_client.py
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import netaddr
from neutron_lib.api.definitions import l3
from neutron_lib.api.definitions import port_security as psec
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import provider_net as pnet
from neutron_lib.api.definitions import segment as segment_def
from neutron_lib import constants as const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
from neutron_lib.utils import helpers
from neutron_lib.utils import net as n_net
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from ovsdbapp.backend.ovs_idl import idlutils
from neutron.common.ovn import acl as ovn_acl
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_revision_numbers_db as db_rev
from neutron.db import segments_db
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.extensions \
import qos as qos_extension
from neutron.scheduler import l3_ovn_scheduler
LOG = log.getLogger(__name__)
OvnPortInfo = collections.namedtuple(
'OvnPortInfo', ['type', 'options', 'addresses', 'port_security',
'parent_name', 'tag', 'dhcpv4_options', 'dhcpv6_options',
'cidrs', 'device_owner', 'security_group_ids'])
GW_INFO = collections.namedtuple('GatewayInfo', ['network_id', 'subnet_id',
'router_ip', 'gateway_ip',
'ip_version', 'ip_prefix'])
class OVNClient(object):
def __init__(self, nb_idl, sb_idl):
self._nb_idl = nb_idl
self._sb_idl = sb_idl
self._plugin_property = None
self._l3_plugin_property = None
# TODO(ralonsoh): handle the OVN client extensions with an ext. manager
self._qos_driver = qos_extension.OVNClientQosExtension(self)
self._ovn_scheduler = l3_ovn_scheduler.get_scheduler()
@property
def _plugin(self):
if self._plugin_property is None:
self._plugin_property = directory.get_plugin()
return self._plugin_property
@property
def _l3_plugin(self):
if self._l3_plugin_property is None:
self._l3_plugin_property = directory.get_plugin(
plugin_constants.L3)
return self._l3_plugin_property
def _transaction(self, commands, txn=None):
"""Create a new transaction or add the commands to an existing one."""
if txn is None:
with self._nb_idl.transaction(check_error=True) as new_txn:
for cmd in commands:
new_txn.add(cmd)
else:
for cmd in commands:
txn.add(cmd)
def _is_virtual_port_supported(self):
# TODO(lucasagomes): Remove this method in the future. The
# "virtual" port type was added in the version 2.12 of OVN
return self._sb_idl.is_col_present('Port_Binding', 'virtual_parent')
def is_external_ports_supported(self):
return self._nb_idl.is_col_present(
'Logical_Switch_Port', 'ha_chassis_group')
def _get_allowed_addresses_from_port(self, port):
if not port.get(psec.PORTSECURITY):
return [], []
if utils.is_lsp_trusted(port):
return [], []
allowed_addresses = set()
new_macs = set()
addresses = port['mac_address']
for ip in port.get('fixed_ips', []):
addresses += ' ' + ip['ip_address']
for allowed_address in port.get('allowed_address_pairs', []):
# If allowed address pair has same mac as the port mac,
# append the allowed ip address to the 'addresses'.
# Else we will have multiple entries for the same mac in
# 'Logical_Switch_Port.port_security'.
if allowed_address['mac_address'] == port['mac_address']:
addresses += ' ' + allowed_address['ip_address']
else:
allowed_addresses.add(allowed_address['mac_address'] + ' ' +
allowed_address['ip_address'])
new_macs.add(allowed_address['mac_address'])
allowed_addresses.add(addresses)
return list(allowed_addresses), list(new_macs)
def _get_subnet_dhcp_options_for_port(self, port, ip_version):
"""Returns the subnet dhcp options for the port.
Return the first found DHCP options belong for the port.
"""
subnets = [
fixed_ip['subnet_id']
for fixed_ip in port['fixed_ips']
if netaddr.IPAddress(fixed_ip['ip_address']).version == ip_version]
get_opts = self._nb_idl.get_subnets_dhcp_options(subnets)
if get_opts:
if ip_version == const.IP_VERSION_6:
# Always try to find a dhcpv6 stateful v6 subnet to return.
# This ensures port can get one stateful v6 address when port
# has multiple dhcpv6 stateful and stateless subnets.
for opts in get_opts:
# We are setting ovn_const.DHCPV6_STATELESS_OPT to "true"
# in _get_ovn_dhcpv6_opts, so entries in DHCP_Options table
# should have unicode type 'true' if they were defined as
# dhcpv6 stateless.
if opts['options'].get(
ovn_const.DHCPV6_STATELESS_OPT) != 'true':
return opts
return get_opts[0]
def _get_port_dhcp_options(self, port, ip_version):
"""Return dhcp options for port.
In case the port is dhcp disabled, or IP addresses it has belong
to dhcp disabled subnets, returns None.
Otherwise, returns a dict:
- with content from a existing DHCP_Options row for subnet, if the
port has no extra dhcp options.
- with only one item ('cmd', AddDHCPOptionsCommand(..)), if the port
has extra dhcp options. The command should be processed in the same
transaction with port creating or updating command to avoid orphan
row issue happen.
"""
lsp_dhcp_disabled, lsp_dhcp_opts = utils.get_lsp_dhcp_opts(
port, ip_version)
if lsp_dhcp_disabled:
return
subnet_dhcp_options = self._get_subnet_dhcp_options_for_port(
port, ip_version)
if not subnet_dhcp_options:
# NOTE(lizk): It's possible for Neutron to configure a port with IP
# address belongs to subnet disabled dhcp. And no DHCP_Options row
# will be inserted for such a subnet. So in that case, the subnet
# dhcp options here will be None.
return
if not lsp_dhcp_opts:
return subnet_dhcp_options
# This port has extra DHCP options defined, so we will create a new
# row in DHCP_Options table for it.
subnet_dhcp_options['options'].update(lsp_dhcp_opts)
subnet_dhcp_options['external_ids'].update(
{'port_id': port['id']})
subnet_id = subnet_dhcp_options['external_ids']['subnet_id']
add_dhcp_opts_cmd = self._nb_idl.add_dhcp_options(
subnet_id, port_id=port['id'],
cidr=subnet_dhcp_options['cidr'],
options=subnet_dhcp_options['options'],
external_ids=subnet_dhcp_options['external_ids'])
return {'cmd': add_dhcp_opts_cmd}
def get_virtual_port_parents(self, virtual_ip, port):
ls = self._nb_idl.ls_get(utils.ovn_name(port['network_id'])).execute(
check_error=True)
return [lsp.name for lsp in ls.ports
if lsp.name != port['id'] and
virtual_ip in utils.get_ovn_port_addresses(lsp)]
def _get_port_options(self, port):
context = n_context.get_admin_context()
binding_prof = utils.validate_and_get_data_from_binding_profile(port)
vtep_physical_switch = binding_prof.get('vtep-physical-switch')
port_type = ''
cidrs = ''
if vtep_physical_switch:
vtep_logical_switch = binding_prof.get('vtep-logical-switch')
port_type = 'vtep'
options = {'vtep-physical-switch': vtep_physical_switch,
'vtep-logical-switch': vtep_logical_switch}
addresses = [ovn_const.UNKNOWN_ADDR]
parent_name = []
tag = []
port_security = []
else:
options = {}
parent_name = binding_prof.get('parent_name', [])
tag = binding_prof.get('tag', [])
address = port['mac_address']
for ip in port.get('fixed_ips', []):
try:
subnet = self._plugin.get_subnet(context, ip['subnet_id'])
except n_exc.SubnetNotFound:
continue
ip_addr = ip['ip_address']
address += ' ' + ip_addr
cidrs += ' {}/{}'.format(ip['ip_address'],
subnet['cidr'].split('/')[1])
# Check if the port being created is a virtual port
if (self._is_virtual_port_supported() and
not port['device_owner']):
parents = self.get_virtual_port_parents(ip_addr, port)
if parents:
port_type = ovn_const.LSP_TYPE_VIRTUAL
options[ovn_const.LSP_OPTIONS_VIRTUAL_IP_KEY] = ip_addr
options[ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY] = (
','.join(parents))
port_security, new_macs = (
self._get_allowed_addresses_from_port(port))
addresses = [address]
addresses.extend(new_macs)
# Only adjust the OVN type if the port is not owned by Neutron
# DHCP agents.
# TODO(mjozefcz): Remove const.DEVICE_OWNER_DHCP
# from get_ports in W-release.
if (port['device_owner'] in [
const.DEVICE_OWNER_DISTRIBUTED,
const.DEVICE_OWNER_DHCP] and
not utils.is_neutron_dhcp_agent_port(port)):
port_type = 'localport'
capabilities = utils.get_port_capabilities(port)
vnic_type = port.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if (vnic_type in ovn_const.EXTERNAL_PORT_TYPES and
ovn_const.PORT_CAP_SWITCHDEV not in capabilities):
if self.is_external_ports_supported():
port_type = ovn_const.LSP_TYPE_EXTERNAL
else:
LOG.warning('The version of OVN used does not support '
'the "external ports" feature used for '
'SR-IOV ports with OVN native DHCP')
# The "unknown" address should only be set for the normal LSP
# ports (the ones which type is empty)
if not port_security and not port_type:
# Port security is disabled for this port.
# So this port can send traffic with any mac address.
# OVN allows any mac address from a port if "unknown"
# is added to the Logical_Switch_Port.addresses column.
# So add it.
addresses.append(ovn_const.UNKNOWN_ADDR)
dhcpv4_options = self._get_port_dhcp_options(port, const.IP_VERSION_4)
dhcpv6_options = self._get_port_dhcp_options(port, const.IP_VERSION_6)
# HA Chassis Group will bind the port to the highest
# priority Chassis
if port_type != ovn_const.LSP_TYPE_EXTERNAL:
options.update({'requested-chassis':
port.get(portbindings.HOST_ID, '')})
device_owner = port.get('device_owner', '')
sg_ids = ' '.join(utils.get_lsp_security_groups(port))
return OvnPortInfo(port_type, options, addresses, port_security,
parent_name, tag, dhcpv4_options, dhcpv6_options,
cidrs.strip(), device_owner, sg_ids)
def _get_default_ha_chassis_group(self):
return self._nb_idl.ha_chassis_group_get(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME).execute(
check_error=True).uuid
def create_port(self, context, port):
if utils.is_lsp_ignored(port):
return
port_info = self._get_port_options(port)
external_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port['name'],
ovn_const.OVN_DEVID_EXT_ID_KEY: port['device_id'],
ovn_const.OVN_PROJID_EXT_ID_KEY: port['project_id'],
ovn_const.OVN_CIDRS_EXT_ID_KEY: port_info.cidrs,
ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY:
port_info.device_owner,
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY:
utils.ovn_name(port['network_id']),
ovn_const.OVN_SG_IDS_EXT_ID_KEY:
port_info.security_group_ids,
ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(
utils.get_revision_number(
port, ovn_const.TYPE_PORTS))}
lswitch_name = utils.ovn_name(port['network_id'])
# It's possible to have a network created on one controller and then a
# port created on a different controller quickly enough that the second
# controller does not yet see that network in its local cache of the
# OVN northbound database. Check if the logical switch is present
# or not in the idl's local copy of the database before creating
# the lswitch port.
self._nb_idl.check_for_row_by_value_and_retry(
'Logical_Switch', 'name', lswitch_name)
with self._nb_idl.transaction(check_error=True) as txn:
if not port_info.dhcpv4_options:
dhcpv4_options = []
elif 'cmd' in port_info.dhcpv4_options:
dhcpv4_options = txn.add(port_info.dhcpv4_options['cmd'])
else:
dhcpv4_options = [port_info.dhcpv4_options['uuid']]
if not port_info.dhcpv6_options:
dhcpv6_options = []
elif 'cmd' in port_info.dhcpv6_options:
dhcpv6_options = txn.add(port_info.dhcpv6_options['cmd'])
else:
dhcpv6_options = [port_info.dhcpv6_options['uuid']]
# The lport_name *must* be neutron port['id']. It must match the
# iface-id set in the Interfaces table of the Open_vSwitch
# database which nova sets to be the port ID.
kwargs = {
'lport_name': port['id'],
'lswitch_name': lswitch_name,
'addresses': port_info.addresses,
'external_ids': external_ids,
'parent_name': port_info.parent_name,
'tag': port_info.tag,
'enabled': port.get('admin_state_up'),
'options': port_info.options,
'type': port_info.type,
'port_security': port_info.port_security,
'dhcpv4_options': dhcpv4_options,
'dhcpv6_options': dhcpv6_options
}
if (self.is_external_ports_supported() and
port_info.type == ovn_const.LSP_TYPE_EXTERNAL):
kwargs['ha_chassis_group'] = (
self._get_default_ha_chassis_group())
# NOTE(mjozefcz): Do not set addresses if the port is not
# bound, has no device_owner and it is OVN LB VIP port.
# For more details check related bug #1789686.
if (port.get('name').startswith(ovn_const.LB_VIP_PORT_PREFIX) and
not port.get('device_owner') and
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_UNBOUND):
kwargs['addresses'] = []
# Check if the parent port was created with the
# allowed_address_pairs already set
allowed_address_pairs = port.get('allowed_address_pairs', [])
if (self._is_virtual_port_supported() and
allowed_address_pairs and
port_info.type != ovn_const.LSP_TYPE_VIRTUAL):
addrs = [addr['ip_address'] for addr in allowed_address_pairs]
self._set_unset_virtual_port_type(context, txn, port, addrs)
port_cmd = txn.add(self._nb_idl.create_lswitch_port(
**kwargs))
sg_ids = utils.get_lsp_security_groups(port)
# If this is not a trusted port or port security is enabled,
# add it to the default drop Port Group so that all traffic
# is dropped by default.
if not utils.is_lsp_trusted(port) or port_info.port_security:
self._add_port_to_drop_port_group(port_cmd, txn)
# Just add the port to its Port Group.
for sg in sg_ids:
txn.add(self._nb_idl.pg_add_ports(
utils.ovn_port_group_name(sg), port_cmd))
if self.is_dns_required_for_port(port):
self.add_txns_to_sync_port_dns_records(txn, port)
self._qos_driver.create_port(txn, port)
db_rev.bump_revision(context, port, ovn_const.TYPE_PORTS)
def _set_unset_virtual_port_type(self, context, txn, parent_port,
addresses, unset=False):
cmd = self._nb_idl.set_lswitch_port_to_virtual_type
if unset:
cmd = self._nb_idl.unset_lswitch_port_to_virtual_type
for addr in addresses:
virt_port = self._plugin.get_ports(context, filters={
portbindings.VIF_TYPE: portbindings.VIF_TYPE_UNBOUND,
'network_id': [parent_port['network_id']],
'fixed_ips': {'ip_address': [addr]}})
if not virt_port:
continue
virt_port = virt_port[0]
args = {'lport_name': virt_port['id'],
'virtual_parent': parent_port['id'],
'if_exists': True}
LOG.debug("Parent port %(virtual_parent)s found for "
"virtual port %(lport_name)s", args)
if not unset:
args['vip'] = addr
txn.add(cmd(**args))
# TODO(lucasagomes): The ``port_object`` parameter was added to
# keep things backward compatible. Remove it in the Rocky release.
def update_port(self, context, port, port_object=None):
if utils.is_lsp_ignored(port):
return
port_info = self._get_port_options(port)
external_ids = {ovn_const.OVN_PORT_NAME_EXT_ID_KEY: port['name'],
ovn_const.OVN_DEVID_EXT_ID_KEY: port['device_id'],
ovn_const.OVN_PROJID_EXT_ID_KEY: port['project_id'],
ovn_const.OVN_CIDRS_EXT_ID_KEY: port_info.cidrs,
ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY:
port_info.device_owner,
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY:
utils.ovn_name(port['network_id']),
ovn_const.OVN_SG_IDS_EXT_ID_KEY:
port_info.security_group_ids,
ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(
utils.get_revision_number(
port, ovn_const.TYPE_PORTS))}
check_rev_cmd = self._nb_idl.check_revision_number(
port['id'], port, ovn_const.TYPE_PORTS)
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(check_rev_cmd)
columns_dict = {}
if utils.is_lsp_router_port(port):
port_info.options.update(
self._nb_idl.get_router_port_options(port['id']))
else:
columns_dict['type'] = port_info.type
columns_dict['addresses'] = port_info.addresses
if not port_info.dhcpv4_options:
dhcpv4_options = []
elif 'cmd' in port_info.dhcpv4_options:
dhcpv4_options = txn.add(port_info.dhcpv4_options['cmd'])
else:
dhcpv4_options = [port_info.dhcpv4_options['uuid']]
if not port_info.dhcpv6_options:
dhcpv6_options = []
elif 'cmd' in port_info.dhcpv6_options:
dhcpv6_options = txn.add(port_info.dhcpv6_options['cmd'])
else:
dhcpv6_options = [port_info.dhcpv6_options['uuid']]
# NOTE(mjozefcz): Do not set addresses if the port is not
# bound, has no device_owner and it is OVN LB VIP port.
# For more details check related bug #1789686.
if (port.get('name').startswith(ovn_const.LB_VIP_PORT_PREFIX) and
not port.get('device_owner') and
port.get(portbindings.VIF_TYPE) ==
portbindings.VIF_TYPE_UNBOUND):
columns_dict['addresses'] = []
if self.is_external_ports_supported():
if port_info.type == ovn_const.LSP_TYPE_EXTERNAL:
columns_dict['ha_chassis_group'] = (
self._get_default_ha_chassis_group())
else:
# Clear the ha_chassis_group field
columns_dict['ha_chassis_group'] = []
ovn_port = self._nb_idl.lookup('Logical_Switch_Port', port['id'])
addr_pairs_diff = utils.compute_address_pairs_diff(ovn_port, port)
if (self._is_virtual_port_supported() and
port_info.type != ovn_const.LSP_TYPE_VIRTUAL):
self._set_unset_virtual_port_type(
context, txn, port, addr_pairs_diff.added)
self._set_unset_virtual_port_type(
context, txn, port, addr_pairs_diff.removed,
unset=True)
# Keep key value pairs that were in the original external ids
# of the ovn port and we did not touch.
for k, v in ovn_port.external_ids.items():
external_ids.setdefault(k, v)
# NOTE(lizk): Fail port updating if port doesn't exist. This
# prevents any new inserted resources to be orphan, such as port
# dhcp options or ACL rules for port, e.g. a port was created
# without extra dhcp options and security group, while updating
# includes the new attributes setting to port.
txn.add(self._nb_idl.set_lswitch_port(
lport_name=port['id'],
external_ids=external_ids,
parent_name=port_info.parent_name,
tag=port_info.tag,
options=port_info.options,
enabled=port['admin_state_up'],
port_security=port_info.port_security,
dhcpv4_options=dhcpv4_options,
dhcpv6_options=dhcpv6_options,
if_exists=False,
**columns_dict))
# Determine if security groups or fixed IPs are updated.
old_sg_ids = set(utils.get_ovn_port_security_groups(ovn_port))
new_sg_ids = set(utils.get_lsp_security_groups(port))
detached_sg_ids = old_sg_ids - new_sg_ids
attached_sg_ids = new_sg_ids - old_sg_ids
for sg in detached_sg_ids:
txn.add(self._nb_idl.pg_del_ports(
utils.ovn_port_group_name(sg), port['id']))
for sg in attached_sg_ids:
txn.add(self._nb_idl.pg_add_ports(
utils.ovn_port_group_name(sg), port['id']))
if (not utils.is_lsp_trusted(port) and
utils.is_port_security_enabled(port)):
self._add_port_to_drop_port_group(port['id'], txn)
# If the port doesn't belong to any security group and
# port_security is disabled, or it's a trusted port, then
# allow all traffic.
elif ((not new_sg_ids and
not utils.is_port_security_enabled(port)) or
utils.is_lsp_trusted(port)):
self._del_port_from_drop_port_group(port['id'], txn)
self._qos_driver.update_port(txn, port, port_object)
if self.is_dns_required_for_port(port):
self.add_txns_to_sync_port_dns_records(
txn, port, original_port=port_object)
elif port_object and self.is_dns_required_for_port(port_object):
# We need to remove the old entries
self.add_txns_to_remove_port_dns_records(txn, port_object)
if check_rev_cmd.result == ovn_const.TXN_COMMITTED:
db_rev.bump_revision(context, port, ovn_const.TYPE_PORTS)
def _delete_port(self, port_id, port_object=None):
ovn_port = self._nb_idl.lookup(
'Logical_Switch_Port', port_id, default=None)
if ovn_port is None:
return
ovn_network_name = ovn_port.external_ids.get(
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY)
network_id = ovn_network_name.replace('neutron-', '')
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(self._nb_idl.delete_lswitch_port(
port_id, ovn_network_name))
p_object = ({'id': port_id, 'network_id': network_id}
if not port_object else port_object)
self._qos_driver.delete_port(txn, p_object)
if port_object and self.is_dns_required_for_port(port_object):
self.add_txns_to_remove_port_dns_records(txn, port_object)
# Check if the port being deleted is a virtual parent
if (ovn_port.type != ovn_const.LSP_TYPE_VIRTUAL and
self._is_virtual_port_supported()):
ls = self._nb_idl.ls_get(ovn_network_name).execute(
check_error=True)
cmd = self._nb_idl.unset_lswitch_port_to_virtual_type
for lsp in ls.ports:
if lsp.type != ovn_const.LSP_TYPE_VIRTUAL:
continue
if port_id in lsp.options.get(
ovn_const.LSP_OPTIONS_VIRTUAL_PARENTS_KEY, ''):
txn.add(cmd(lsp.name, port_id, if_exists=True))
# TODO(lucasagomes): The ``port_object`` parameter was added to
# keep things backward compatible. Remove it in the Rocky release.
def delete_port(self, context, port_id, port_object=None):
try:
self._delete_port(port_id, port_object=port_object)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Failed to delete port %(port)s. Error: '
'%(error)s', {'port': port_id, 'error': e})
db_rev.delete_revision(context, port_id, ovn_const.TYPE_PORTS)
def _create_or_update_floatingip(self, floatingip, txn=None):
router_id = floatingip.get('router_id')
if not router_id:
return
# FIPs used for port forwarding have no fixed address
# configured. Also, OVN handler for port forwarding
# is delegated to OVNPortForwarding. Nothing further
# to do here.
if floatingip['fixed_ip_address'] is None:
LOG.debug("Skipping NAT for floating ip %(id)s, external ip "
"%(fip_ip)s on router %(rtr_id)s: no logical_ip",
{'id': floatingip['id'],
'fip_ip': floatingip['floating_ip_address'],
'rtr_id': router_id})
return
commands = []
admin_context = n_context.get_admin_context()
fip_db = self._l3_plugin._get_floatingip(
admin_context, floatingip['id'])
port_db = self._plugin.get_port(
admin_context, fip_db['floating_port_id'])
gw_lrouter_name = utils.ovn_name(router_id)
ext_ids = {
ovn_const.OVN_FIP_EXT_ID_KEY: floatingip['id'],
ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number(
floatingip, ovn_const.TYPE_FLOATINGIPS)),
ovn_const.OVN_FIP_PORT_EXT_ID_KEY: floatingip['port_id'],
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY: gw_lrouter_name,
ovn_const.OVN_FIP_EXT_MAC_KEY: port_db['mac_address'],
ovn_const.OVN_FIP_NET_ID: floatingip['floating_network_id']}
columns = {'type': 'dnat_and_snat',
'logical_ip': floatingip['fixed_ip_address'],
'external_ip': floatingip['floating_ip_address'],
'logical_port': floatingip['port_id'],
'external_ids': ext_ids}
if ovn_conf.is_ovn_distributed_floating_ip():
if self._nb_idl.lsp_get_up(floatingip['port_id']).execute():
columns['external_mac'] = port_db['mac_address']
# TODO(mjozefcz): Remove this workaround when OVN LB
# will support both decentralized FIPs on LB and member.
lb_member_fip = self._is_lb_member_fip(admin_context, floatingip)
if (ovn_conf.is_ovn_distributed_floating_ip() and
lb_member_fip):
LOG.warning("Port %s is configured as a member "
"of one of OVN Load_Balancers and "
"Load_Balancer has FIP assigned. "
"In order to make traffic work member "
"FIP needs to be centralized, even if "
"this environment is configured as DVR. "
"Removing logical_port and external_mac from "
"NAT entry.", floatingip['port_id'])
columns.pop('logical_port', None)
columns.pop('external_mac', None)
commands.append(self._nb_idl.add_nat_rule_in_lrouter(gw_lrouter_name,
**columns))
# Get the logical port (of the private network) and set the field
# external_ids:fip=<FIP>. This will be used by the ovn octavia driver
# to add the floating ip as vip in the Load_Balancer.vips column.
private_lsp = self._nb_idl.get_lswitch_port(floatingip['port_id'])
if private_lsp:
port_fip = {
ovn_const.OVN_PORT_FIP_EXT_ID_KEY:
floatingip['floating_ip_address']}
commands.append(
self._nb_idl.db_set('Logical_Switch_Port', private_lsp.uuid,
('external_ids', port_fip))
)
if not lb_member_fip:
commands.extend(
self._handle_lb_fip_cmds(
admin_context, private_lsp,
action=ovn_const.FIP_ACTION_ASSOCIATE))
else:
LOG.warning("LSP for floatingip %s, has not been found! "
"Cannot set FIP on VIP.",
floatingip['id'])
self._transaction(commands, txn=txn)
def _is_lb_member_fip(self, context, fip):
port = self._plugin.get_port(
context, fip['port_id'])
member_subnet = [ip['subnet_id'] for ip in port['fixed_ips']
if ip['ip_address'] == fip['fixed_ip_address']]
if not member_subnet:
return False
member_subnet = member_subnet[0]
ls = self._nb_idl.lookup(
'Logical_Switch', utils.ovn_name(port['network_id']))
for lb in ls.load_balancer:
for ext_id in lb.external_ids.keys():
if ext_id.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX):
members = lb.external_ids[ext_id]
if not members:
continue
for member in members.split(','):
if ('%s:' % fip['fixed_ip_address'] in member and
'_%s' % member_subnet in member):
return True
return False
def _handle_lb_fip_cmds(self, context, lb_lsp,
action=ovn_const.FIP_ACTION_ASSOCIATE):
commands = []
if not ovn_conf.is_ovn_distributed_floating_ip():
return commands
lb_lsp_fip_port = lb_lsp.external_ids.get(
ovn_const.OVN_PORT_NAME_EXT_ID_KEY, '')
if not lb_lsp_fip_port.startswith(ovn_const.LB_VIP_PORT_PREFIX):
return commands
# This is a FIP on LB VIP.
# Loop over members and delete FIP external_mac/logical_port enteries.
# Find all LBs with this LSP as VIP.
lbs = self._nb_idl.db_find_rows(
'Load_Balancer',
('external_ids', '=', {
ovn_const.LB_EXT_IDS_VIP_PORT_ID_KEY: lb_lsp.name})
).execute(check_error=True)
for lb in lbs:
# GET all LS where given LB is linked.
ls_linked = [
item
for item in self._nb_idl.db_find_rows(
'Logical_Switch').execute(check_error=True)
if lb in item.load_balancer]
if not ls_linked:
return
# Find out IP addresses and subnets of configured members.
members_to_verify = []
for ext_id in lb.external_ids.keys():
if ext_id.startswith(ovn_const.LB_EXT_IDS_POOL_PREFIX):
members = lb.external_ids[ext_id]
if not members:
continue
for member in members.split(','):
# NOTE(mjozefcz): Remove this workaround in W release.
# Last argument of member info is a subnet_id from
# from which member comes from.
# member_`id`_`ip`:`port`_`subnet_ip`
member_info = member.split('_')
if len(member_info) >= 4:
m = {}
m['id'] = member_info[1]
m['ip'] = member_info[2].split(':')[0]
m['subnet_id'] = member_info[3]
try:
subnet = self._plugin.get_subnet(
context, m['subnet_id'])
m['network_id'] = subnet['network_id']
members_to_verify.append(m)
except n_exc.SubnetNotFound:
LOG.debug("Cannot find subnet details "
"for OVN LB member "
"%s.", m['id'])
# Find a member LSPs from all linked LS to this LB.
for member in members_to_verify:
ls = self._nb_idl.lookup(
'Logical_Switch', utils.ovn_name(member['network_id']))
for lsp in ls.ports:
if not lsp.addresses:
continue
if member['ip'] in utils.remove_macs_from_lsp_addresses(
lsp.addresses):
member['lsp'] = lsp
nats = self._nb_idl.db_find_rows(
'NAT',
('external_ids', '=', {
ovn_const.OVN_FIP_PORT_EXT_ID_KEY: lsp.name})
).execute(check_error=True)
for nat in nats:
if action == ovn_const.FIP_ACTION_ASSOCIATE:
# NOTE(mjozefcz): We should delete logical_port
# and external_mac entries from member NAT in
# order to make traffic work.
LOG.warning(
"Port %s is configured as a member "
"of one of OVN Load_Balancers and "
"Load_Balancer has FIP assigned. "
"In order to make traffic work member "
"FIP needs to be centralized, even if "
"this environment is configured as "
"DVR. Removing logical_port and "
"external_mac from NAT entry.",
lsp.name)
commands.extend([
self._nb_idl.db_clear(
'NAT', nat.uuid, 'external_mac'),
self._nb_idl.db_clear(
'NAT', nat.uuid, 'logical_port')])
else:
# NOTE(mjozefcz): The FIP from LB VIP is
# dissassociated now. We can decentralize
# member FIPs now.
LOG.warning(
"Port %s is configured as a member "
"of one of OVN Load_Balancers and "
"Load_Balancer has FIP disassociated. "
"DVR for this port can be enabled back.",
lsp.name)
commands.append(self._nb_idl.db_set(
'NAT', nat.uuid,
('logical_port', lsp.name)))
port = self._plugin.get_port(context, lsp.name)
if port['status'] == const.PORT_STATUS_ACTIVE:
commands.append(
self._nb_idl.db_set(
'NAT', nat.uuid,
('external_mac',
port['mac_address'])))
return commands
def _delete_floatingip(self, fip, lrouter, txn=None):
commands = [self._nb_idl.delete_nat_rule_in_lrouter(
lrouter, type='dnat_and_snat',
logical_ip=fip['logical_ip'],
external_ip=fip['external_ip'])]
try:
port_id = (
fip['external_ids'].get(ovn_const.OVN_FIP_PORT_EXT_ID_KEY))
if port_id:
private_lsp = self._nb_idl.get_lswitch_port(port_id)
if private_lsp:
commands.append(
self._nb_idl.db_remove(
'Logical_Switch_Port', private_lsp.uuid,
'external_ids',
(ovn_const.OVN_PORT_FIP_EXT_ID_KEY)))
commands.extend(
self._handle_lb_fip_cmds(
n_context.get_admin_context(),
private_lsp,
action=ovn_const.FIP_ACTION_DISASSOCIATE))
except KeyError:
LOG.debug("FIP %s doesn't have external_ids.", fip)
self._transaction(commands, txn=txn)
def update_floatingip_status(self, context, floatingip):
# NOTE(lucasagomes): OVN doesn't care about the floating ip
# status, this method just bumps the revision number
check_rev_cmd = self._nb_idl.check_revision_number(
floatingip['id'], floatingip, ovn_const.TYPE_FLOATINGIPS)
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(check_rev_cmd)
if check_rev_cmd.result == ovn_const.TXN_COMMITTED:
db_rev.bump_revision(
context, floatingip, ovn_const.TYPE_FLOATINGIPS)
def create_floatingip(self, context, floatingip):
try:
with self._nb_idl.transaction(check_error=True) as txn:
self._create_or_update_floatingip(floatingip, txn=txn)
self._qos_driver.create_floatingip(txn, floatingip)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Unable to create floating ip in gateway '
'router. Error: %s', e)
db_rev.bump_revision(context, floatingip, ovn_const.TYPE_FLOATINGIPS)
# NOTE(lucasagomes): Revise the expected status
# of floating ips, setting it to ACTIVE here doesn't
# see consistent with other drivers (ODL here), see:
# https://bugs.launchpad.net/networking-ovn/+bug/1657693
if floatingip.get('router_id'):
self._l3_plugin.update_floatingip_status(
n_context.get_admin_context(), floatingip['id'],
const.FLOATINGIP_STATUS_ACTIVE)
def update_floatingip(self, context, floatingip):
fip_status = None
router_id = None
ovn_fip = self._nb_idl.get_floatingip(floatingip['id'])
check_rev_cmd = self._nb_idl.check_revision_number(
floatingip['id'], floatingip, ovn_const.TYPE_FLOATINGIPS)
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(check_rev_cmd)
if ovn_fip:
lrouter = ovn_fip['external_ids'].get(
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY,
utils.ovn_name(router_id))
self._delete_floatingip(ovn_fip, lrouter, txn=txn)
fip_status = const.FLOATINGIP_STATUS_DOWN
if floatingip.get('port_id'):
self._create_or_update_floatingip(floatingip, txn=txn)
fip_status = const.FLOATINGIP_STATUS_ACTIVE
self._qos_driver.update_floatingip(txn, floatingip)
if check_rev_cmd.result == ovn_const.TXN_COMMITTED:
db_rev.bump_revision(
context, floatingip, ovn_const.TYPE_FLOATINGIPS)
if fip_status:
self._l3_plugin.update_floatingip_status(
context, floatingip['id'], fip_status)
def delete_floatingip(self, context, fip_id):
router_id = None
ovn_fip = self._nb_idl.get_floatingip(fip_id)
if ovn_fip:
lrouter = ovn_fip['external_ids'].get(
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY,
utils.ovn_name(router_id))
fip_net_id = ovn_fip['external_ids'].get(ovn_const.OVN_FIP_NET_ID)
fip_dict = {'floating_network_id': fip_net_id, 'id': fip_id}
try:
with self._nb_idl.transaction(check_error=True) as txn:
self._delete_floatingip(ovn_fip, lrouter, txn=txn)
self._qos_driver.delete_floatingip(txn, fip_dict)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Unable to delete floating ip in gateway '
'router. Error: %s', e)
db_rev.delete_revision(context, fip_id, ovn_const.TYPE_FLOATINGIPS)
def disassociate_floatingip(self, floatingip, router_id):
lrouter = utils.ovn_name(router_id)
try:
with self._nb_idl.transaction(check_error=True) as txn:
self._delete_floatingip(floatingip, lrouter, txn=txn)
self._qos_driver.delete_floatingip(txn, floatingip)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Unable to disassociate floating ip in gateway '
'router. Error: %s', e)
def _get_gw_info(self, context, router):
gateways_info = []
ext_gw_info = router.get(l3.EXTERNAL_GW_INFO, {})
network_id = ext_gw_info.get('network_id', '')
for ext_fixed_ip in ext_gw_info.get('external_fixed_ips', []):
subnet_id = ext_fixed_ip['subnet_id']
subnet = self._plugin.get_subnet(context, subnet_id)
gateways_info.append(GW_INFO(
network_id, subnet_id, ext_fixed_ip['ip_address'],
subnet.get('gateway_ip'), subnet['ip_version'],
const.IPv4_ANY if subnet['ip_version'] == const.IP_VERSION_4
else const.IPv6_ANY))
return gateways_info
def _delete_router_ext_gw(self, router, networks, txn):
context = n_context.get_admin_context()
if not networks:
networks = []
router_id = router['id']
gw_port_id = router['gw_port_id']
gw_lrouter_name = utils.ovn_name(router_id)
gateways = self._get_gw_info(context, router)
for gw_info in gateways:
if gw_info.ip_version == const.IP_VERSION_4:
for network in networks:
txn.add(self._nb_idl.delete_nat_rule_in_lrouter(
gw_lrouter_name, type='snat', logical_ip=network,
external_ip=gw_info.router_ip))
txn.add(self._nb_idl.delete_static_route(
gw_lrouter_name, ip_prefix=gw_info.ip_prefix,
nexthop=gw_info.gateway_ip))
txn.add(self._nb_idl.delete_lrouter_port(
utils.ovn_lrouter_port_name(gw_port_id),
gw_lrouter_name))
def _get_nets_and_ipv6_ra_confs_for_router_port(
self, context, port_fixed_ips):
networks = set()
ipv6_ra_configs = {}
ipv6_ra_configs_supported = self._nb_idl.is_col_present(
'Logical_Router_Port', 'ipv6_ra_configs')
for fixed_ip in port_fixed_ips:
subnet_id = fixed_ip['subnet_id']
subnet = self._plugin.get_subnet(context, subnet_id)
cidr = netaddr.IPNetwork(subnet['cidr'])
networks.add("%s/%s" % (fixed_ip['ip_address'],
str(cidr.prefixlen)))
if subnet.get('ipv6_address_mode') and not ipv6_ra_configs and (
ipv6_ra_configs_supported):
ipv6_ra_configs['address_mode'] = (
utils.get_ovn_ipv6_address_mode(
subnet['ipv6_address_mode']))
ipv6_ra_configs['send_periodic'] = 'true'
net = self._plugin.get_network(context, subnet['network_id'])
ipv6_ra_configs['mtu'] = str(net['mtu'])
return list(networks), ipv6_ra_configs
def _add_router_ext_gw(self, router, networks, txn):
context = n_context.get_admin_context()
# 1. Add the external gateway router port.
gateways = self._get_gw_info(context, router)
gw_port_id = router['gw_port_id']
port = self._plugin.get_port(context, gw_port_id)
self._create_lrouter_port(context, router, port, txn=txn)
# 2. Add default route with nexthop as gateway ip
lrouter_name = utils.ovn_name(router['id'])
for gw_info in gateways:
columns = {'external_ids': {
ovn_const.OVN_ROUTER_IS_EXT_GW: 'true',
ovn_const.OVN_SUBNET_EXT_ID_KEY: gw_info.subnet_id}}
txn.add(self._nb_idl.add_static_route(
lrouter_name, ip_prefix=gw_info.ip_prefix,
nexthop=gw_info.gateway_ip, **columns))
# 3. Add snat rules for tenant networks in lrouter if snat is enabled
if utils.is_snat_enabled(router) and networks:
self.update_nat_rules(router, networks, enable_snat=True, txn=txn)
return port
def _check_external_ips_changed(self, ovn_snats,
ovn_static_routes, router):
context = n_context.get_admin_context()
gateways = self._get_gw_info(context, router)
ovn_gw_subnets = None
if self._nb_idl.is_col_present('Logical_Router_Static_Route',
'external_ids'):
ovn_gw_subnets = [
getattr(route, 'external_ids', {}).get(
ovn_const.OVN_SUBNET_EXT_ID_KEY) for route in
ovn_static_routes]
for gw_info in gateways:
if ovn_gw_subnets and gw_info.subnet_id not in ovn_gw_subnets:
return True
if gw_info.ip_version == 6:
continue
for snat in ovn_snats:
if snat.external_ip != gw_info.router_ip:
return True
return False
def update_router_routes(self, context, router_id, add, remove,
txn=None):
if not any([add, remove]):
return
lrouter_name = utils.ovn_name(router_id)
commands = []
for route in add:
commands.append(
self._nb_idl.add_static_route(
lrouter_name, ip_prefix=route['destination'],
nexthop=route['nexthop']))
for route in remove:
commands.append(
self._nb_idl.delete_static_route(
lrouter_name, ip_prefix=route['destination'],
nexthop=route['nexthop']))
self._transaction(commands, txn=txn)
def _get_router_ports(self, context, router_id, get_gw_port=False):
router_db = self._l3_plugin._get_router(context, router_id)
if get_gw_port:
return [p.port for p in router_db.attached_ports]
else:
# When the existing deployment is migrated to OVN
# we may need to consider other port types - DVR_INTERFACE/HA_INTF.
return [p.port for p in router_db.attached_ports
if p.port_type in [const.DEVICE_OWNER_ROUTER_INTF,
const.DEVICE_OWNER_DVR_INTERFACE,
const.DEVICE_OWNER_HA_REPLICATED_INT,
const.DEVICE_OWNER_ROUTER_HA_INTF]]
def _get_v4_network_for_router_port(self, context, port):
cidr = None
for fixed_ip in port['fixed_ips']:
subnet_id = fixed_ip['subnet_id']
subnet = self._plugin.get_subnet(context, subnet_id)
if subnet['ip_version'] != 4:
continue
cidr = subnet['cidr']
return cidr
def _get_v4_network_of_all_router_ports(self, context, router_id,
ports=None):
networks = []
ports = ports or self._get_router_ports(context, router_id)
for port in ports:
network = self._get_v4_network_for_router_port(context, port)
if network:
networks.append(network)
return networks
def _gen_router_ext_ids(self, router):
return {
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY:
router.get('name', 'no_router_name'),
ovn_const.OVN_GW_PORT_EXT_ID_KEY:
router.get('gw_port_id') or '',
ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number(
router, ovn_const.TYPE_ROUTERS)),
ovn_const.OVN_ROUTER_AZ_HINTS_EXT_ID_KEY:
','.join(utils.get_az_hints(router))}
def create_router(self, context, router, add_external_gateway=True):
"""Create a logical router."""
external_ids = self._gen_router_ext_ids(router)
enabled = router.get('admin_state_up')
lrouter_name = utils.ovn_name(router['id'])
added_gw_port = None
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(self._nb_idl.create_lrouter(lrouter_name,
external_ids=external_ids,
enabled=enabled,
options={}))
# TODO(lucasagomes): add_external_gateway is being only used
# by the ovn_db_sync.py script, remove it after the database
# synchronization work
if add_external_gateway:
networks = self._get_v4_network_of_all_router_ports(
context, router['id'])
if router.get(l3.EXTERNAL_GW_INFO) and networks is not None:
added_gw_port = self._add_router_ext_gw(
router, networks, txn)
if added_gw_port:
db_rev.bump_revision(context, added_gw_port,
ovn_const.TYPE_ROUTER_PORTS)
db_rev.bump_revision(context, router, ovn_const.TYPE_ROUTERS)
# TODO(lucasagomes): The ``router_object`` parameter was added to
# keep things backward compatible with old routers created prior to
# the database sync work. Remove it in the Rocky release.
def update_router(self, context, new_router, router_object=None):
"""Update a logical router."""
router_id = new_router['id']
router_name = utils.ovn_name(router_id)
ovn_router = self._nb_idl.get_lrouter(router_name)
gateway_new = new_router.get(l3.EXTERNAL_GW_INFO)
gateway_old = utils.get_lrouter_ext_gw_static_route(ovn_router)
added_gw_port = None
deleted_gw_port_id = None
if router_object:
gateway_old = gateway_old or router_object.get(l3.EXTERNAL_GW_INFO)
ovn_snats = utils.get_lrouter_snats(ovn_router)
networks = self._get_v4_network_of_all_router_ports(context, router_id)
try:
check_rev_cmd = self._nb_idl.check_revision_number(
router_name, new_router, ovn_const.TYPE_ROUTERS)
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(check_rev_cmd)
if gateway_new and not gateway_old:
# Route gateway is set
added_gw_port = self._add_router_ext_gw(
new_router, networks, txn)
elif gateway_old and not gateway_new:
# router gateway is removed
txn.add(self._nb_idl.delete_lrouter_ext_gw(router_name))
if router_object:
self._delete_router_ext_gw(
router_object, networks, txn)
deleted_gw_port_id = router_object['gw_port_id']
elif gateway_new and gateway_old:
# Check if external gateway has changed, if yes, delete
# the old gateway and add the new gateway
if self._check_external_ips_changed(
ovn_snats, gateway_old, new_router):
txn.add(self._nb_idl.delete_lrouter_ext_gw(
router_name))
if router_object:
self._delete_router_ext_gw(
router_object, networks, txn)
deleted_gw_port_id = router_object['gw_port_id']
added_gw_port = self._add_router_ext_gw(
new_router, networks, txn)
else:
# Check if snat has been enabled/disabled and update
new_snat_state = gateway_new.get('enable_snat', True)
if bool(ovn_snats) != new_snat_state:
if utils.is_snat_enabled(new_router) and networks:
self.update_nat_rules(
new_router, networks,
enable_snat=new_snat_state, txn=txn)
update = {'external_ids': self._gen_router_ext_ids(new_router)}
update['enabled'] = new_router.get('admin_state_up') or False
txn.add(self._nb_idl.update_lrouter(router_name, **update))
# Check for route updates
routes = new_router.get('routes', [])
old_routes = utils.get_lrouter_non_gw_routes(ovn_router)
added, removed = helpers.diff_list_of_dict(
old_routes, routes)
self.update_router_routes(
context, router_id, added, removed, txn=txn)
if check_rev_cmd.result == ovn_const.TXN_COMMITTED:
db_rev.bump_revision(context, new_router,
ovn_const.TYPE_ROUTERS)
if added_gw_port:
db_rev.bump_revision(context, added_gw_port,
ovn_const.TYPE_ROUTER_PORTS)
if deleted_gw_port_id:
db_rev.delete_revision(context, deleted_gw_port_id,
ovn_const.TYPE_ROUTER_PORTS)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error('Unable to update router %(router)s. '
'Error: %(error)s', {'router': router_id,
'error': e})
def delete_router(self, context, router_id):
"""Delete a logical router."""
lrouter_name = utils.ovn_name(router_id)
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(self._nb_idl.delete_lrouter(lrouter_name))
db_rev.delete_revision(context, router_id, ovn_const.TYPE_ROUTERS)
def get_candidates_for_scheduling(self, physnet, cms=None,
chassis_physnets=None,
availability_zone_hints=None):
"""Return chassis for scheduling gateway router.
Criteria for selecting chassis as candidates
1) chassis from cms with proper bridge mappings
2) if no chassis is available from 1) then,
select chassis with proper bridge mappings
3) Filter the available chassis accordingly to the routers
availability zone hints (if present)
"""
# TODO(lucasagomes): Simplify the logic here, the CMS option has
# been introduced long ago and by now all gateway chassis should
# include it. This will match the logic in the is_gateway_chassis()
# (utils.py)
cms = cms or self._sb_idl.get_gateway_chassis_from_cms_options()
chassis_physnets = (chassis_physnets or
self._sb_idl.get_chassis_and_physnets())
cms_bmaps = []
bmaps = []
for chassis, physnets in chassis_physnets.items():
if physnet and physnet in physnets:
if chassis in cms:
cms_bmaps.append(chassis)
else:
bmaps.append(chassis)
candidates = cms_bmaps or bmaps
# Filter for availability zones
if availability_zone_hints:
LOG.debug('Filtering Chassis candidates by availability zone '
'hints: %s', ', '.join(availability_zone_hints))
candidates = [ch for ch in candidates
for az in availability_zone_hints
if az in utils.get_chassis_availability_zones(
self._sb_idl.lookup('Chassis', ch, None))]
if not cms_bmaps:
LOG.debug("No eligible chassis with external connectivity"
" through ovn-cms-options for %s", physnet)
LOG.debug("Chassis candidates with external connectivity: %s",
candidates)
return candidates
def _get_physnet(self, network):
if network.get(pnet.NETWORK_TYPE) in [const.TYPE_FLAT,
const.TYPE_VLAN]:
return network.get(pnet.PHYSICAL_NETWORK)
def _gen_router_port_ext_ids(self, port):
ext_ids = {
ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number(
port, ovn_const.TYPE_ROUTER_PORTS)),
ovn_const.OVN_SUBNET_EXT_IDS_KEY:
' '.join(utils.get_port_subnet_ids(port)),
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY:
utils.ovn_name(port['network_id'])}
router_id = port.get('device_id')
if router_id:
ext_ids[ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY] = router_id
return ext_ids
def _gen_router_port_options(self, port, network=None):
options = {}
if network is None:
network = self._plugin.get_network(n_context.get_admin_context(),
port['network_id'])
# For VLAN type networks we need to set the
# "reside-on-redirect-chassis" option so the routing for this
# logical router port is centralized in the chassis hosting the
# distributed gateway port.
# https://github.com/openvswitch/ovs/commit/85706c34d53d4810f54bec1de662392a3c06a996
if network.get(pnet.NETWORK_TYPE) == const.TYPE_VLAN:
options['reside-on-redirect-chassis'] = 'true'
is_gw_port = const.DEVICE_OWNER_ROUTER_GW == port.get(
'device_owner')
if is_gw_port and ovn_conf.is_ovn_emit_need_to_frag_enabled():
options[ovn_const.OVN_ROUTER_PORT_GW_MTU_OPTION] = str(
network['mtu'])
return options
def _create_lrouter_port(self, context, router, port, txn=None):
"""Create a logical router port."""
lrouter = utils.ovn_name(router['id'])
networks, ipv6_ra_configs = (
self._get_nets_and_ipv6_ra_confs_for_router_port(
context, port['fixed_ips']))
lrouter_port_name = utils.ovn_lrouter_port_name(port['id'])
is_gw_port = const.DEVICE_OWNER_ROUTER_GW == port.get(
'device_owner')
columns = {}
columns['options'] = self._gen_router_port_options(port)
if is_gw_port:
port_net = self._plugin.get_network(n_context.get_admin_context(),
port['network_id'])
physnet = self._get_physnet(port_net)
candidates = self.get_candidates_for_scheduling(
physnet, availability_zone_hints=utils.get_az_hints(router))
selected_chassis = self._ovn_scheduler.select(
self._nb_idl, self._sb_idl, lrouter_port_name,
candidates=candidates)
if selected_chassis:
columns['gateway_chassis'] = selected_chassis
lsp_address = ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER
if ipv6_ra_configs:
columns['ipv6_ra_configs'] = ipv6_ra_configs
commands = [
self._nb_idl.add_lrouter_port(
name=lrouter_port_name,
lrouter=lrouter,
mac=port['mac_address'],
networks=networks,
may_exist=True,
external_ids=self._gen_router_port_ext_ids(port),
**columns),
self._nb_idl.set_lrouter_port_in_lswitch_port(
port['id'], lrouter_port_name, is_gw_port=is_gw_port,
lsp_address=lsp_address)]
self._transaction(commands, txn=txn)
def create_router_port(self, context, router_id, router_interface):
port = self._plugin.get_port(context, router_interface['port_id'])
router = self._l3_plugin.get_router(context, router_id)
with self._nb_idl.transaction(check_error=True) as txn:
multi_prefix = False
if (len(router_interface.get('subnet_ids', [])) == 1 and
len(port['fixed_ips']) > 1):
# NOTE(lizk) It's adding a subnet onto an already
# existing router interface port, try to update lrouter port
# 'networks' column.
self._update_lrouter_port(context, port, txn=txn)
multi_prefix = True
else:
self._create_lrouter_port(context, router, port, txn=txn)
if router.get(l3.EXTERNAL_GW_INFO):
cidr = None
for fixed_ip in port['fixed_ips']:
subnet = self._plugin.get_subnet(context,
fixed_ip['subnet_id'])
if multi_prefix:
if 'subnet_id' in router_interface:
if subnet['id'] != router_interface['subnet_id']:
continue
if subnet['ip_version'] == 4:
cidr = subnet['cidr']
if utils.is_snat_enabled(router) and cidr:
self.update_nat_rules(router, networks=[cidr],
enable_snat=True, txn=txn)
db_rev.bump_revision(context, port, ovn_const.TYPE_ROUTER_PORTS)
def _update_lrouter_port(self, context, port, if_exists=False, txn=None):
"""Update a logical router port."""
networks, ipv6_ra_configs = (
self._get_nets_and_ipv6_ra_confs_for_router_port(
context, port['fixed_ips']))
lsp_address = ovn_const.DEFAULT_ADDR_FOR_LSP_WITH_PEER
lrp_name = utils.ovn_lrouter_port_name(port['id'])
update = {'networks': networks, 'ipv6_ra_configs': ipv6_ra_configs}
is_gw_port = const.DEVICE_OWNER_ROUTER_GW == port.get(
'device_owner')
commands = [
self._nb_idl.update_lrouter_port(
name=lrp_name,
external_ids=self._gen_router_port_ext_ids(port),
options=self._gen_router_port_options(port),
if_exists=if_exists,
**update),
self._nb_idl.set_lrouter_port_in_lswitch_port(
port['id'], lrp_name, is_gw_port=is_gw_port,
lsp_address=lsp_address)]
self._transaction(commands, txn=txn)
def update_router_port(self, context, port, if_exists=False):
lrp_name = utils.ovn_lrouter_port_name(port['id'])
check_rev_cmd = self._nb_idl.check_revision_number(
lrp_name, port, ovn_const.TYPE_ROUTER_PORTS)
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(check_rev_cmd)
self._update_lrouter_port(context, port, if_exists=if_exists,
txn=txn)
if check_rev_cmd.result == ovn_const.TXN_COMMITTED:
db_rev.bump_revision(
context, port, ovn_const.TYPE_ROUTER_PORTS)
def _delete_lrouter_port(self, context, port_id, router_id=None, txn=None):
"""Delete a logical router port."""
commands = [self._nb_idl.lrp_del(
utils.ovn_lrouter_port_name(port_id),
utils.ovn_name(router_id) if router_id else None,
if_exists=True)]
self._transaction(commands, txn=txn)
db_rev.delete_revision(context, port_id, ovn_const.TYPE_ROUTER_PORTS)
def delete_router_port(self, context, port_id, router_id=None,
subnet_ids=None):
try:
ovn_port = self._nb_idl.lookup(
'Logical_Router_Port', utils.ovn_lrouter_port_name(port_id))
except idlutils.RowNotFound:
return
subnet_ids = subnet_ids or []
port_removed = False
with self._nb_idl.transaction(check_error=True) as txn:
port = None
try:
port = self._plugin.get_port(context, port_id)
# The router interface port still exists, call ovn to
# update it
self._update_lrouter_port(context, port, txn=txn)
except n_exc.PortNotFound:
# The router interface port doesn't exist any more,
# we will call ovn to delete it once we remove the snat
# rules in the router itself if we have to
port_removed = True
router_id = router_id or ovn_port.external_ids.get(
ovn_const.OVN_ROUTER_NAME_EXT_ID_KEY)
if not router_id:
router_id = port.get('device_id')
router = None
if router_id:
router = self._l3_plugin.get_router(context, router_id)
if not router.get(l3.EXTERNAL_GW_INFO):
if port_removed:
self._delete_lrouter_port(context, port_id, router_id,
txn=txn)
return
if not subnet_ids:
subnet_ids = ovn_port.external_ids.get(
ovn_const.OVN_SUBNET_EXT_IDS_KEY, [])
subnet_ids = subnet_ids.split()
elif port:
subnet_ids = utils.get_port_subnet_ids(port)
cidr = None
for sid in subnet_ids:
subnet = self._plugin.get_subnet(context, sid)
if subnet['ip_version'] == 4:
cidr = subnet['cidr']
break
if router and utils.is_snat_enabled(router) and cidr:
self.update_nat_rules(
router, networks=[cidr], enable_snat=False, txn=txn)
# NOTE(mangelajo): If the port doesn't exist anymore, we
# delete the router port as the last operation and update the
# revision database to ensure consistency
if port_removed:
self._delete_lrouter_port(context, port_id, router_id, txn=txn)
else:
# otherwise, we just update the revision database
db_rev.bump_revision(
context, port, ovn_const.TYPE_ROUTER_PORTS)
def update_nat_rules(self, router, networks, enable_snat, txn=None):
"""Update the NAT rules in a logical router."""
context = n_context.get_admin_context()
func = (self._nb_idl.add_nat_rule_in_lrouter if enable_snat else
self._nb_idl.delete_nat_rule_in_lrouter)
gw_lrouter_name = utils.ovn_name(router['id'])
gateways = self._get_gw_info(context, router)
# Update NAT rules only for IPv4 subnets
commands = [func(gw_lrouter_name, type='snat', logical_ip=network,
external_ip=gw_info.router_ip) for gw_info in gateways
if gw_info.ip_version != const.IP_VERSION_6
for network in networks]
self._transaction(commands, txn=txn)
def create_provnet_port(self, network_id, segment, txn=None):
tag = segment.get(segment_def.SEGMENTATION_ID, [])
physnet = segment.get(segment_def.PHYSICAL_NETWORK)
cmd = self._nb_idl.create_lswitch_port(
lport_name=utils.ovn_provnet_port_name(segment['id']),
lswitch_name=utils.ovn_name(network_id),
addresses=[ovn_const.UNKNOWN_ADDR],
external_ids={},
type=ovn_const.LSP_TYPE_LOCALNET,
tag=tag,
options={'network_name': physnet})
self._transaction([cmd], txn=txn)
def delete_provnet_port(self, network_id, segment):
port_to_del = utils.ovn_provnet_port_name(segment['id'])
legacy_port_name = utils.ovn_provnet_port_name(network_id)
physnet = segment.get(segment_def.PHYSICAL_NETWORK)
lswitch = self._nb_idl.get_lswitch(utils.ovn_name(network_id))
lports = [lp.name for lp in lswitch.ports]
# Cover the situation where localnet ports
# were named after network_id and not segment_id.
# TODO(mjozefcz): Remove this in w-release.
if (port_to_del not in lports and
legacy_port_name in lports):
for lport in lswitch.ports:
if (legacy_port_name == lport.name and
lport.options['network_name'] == physnet):
port_to_del = legacy_port_name
break
cmd = self._nb_idl.delete_lswitch_port(
lport_name=port_to_del,
lswitch_name=utils.ovn_name(network_id))
self._transaction([cmd])
def _gen_network_parameters(self, network):
params = {'external_ids': {
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY: network['name'],
ovn_const.OVN_NETWORK_MTU_EXT_ID_KEY: str(network['mtu']),
ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(
utils.get_revision_number(network, ovn_const.TYPE_NETWORKS))}}
# Enable IGMP snooping if igmp_snooping_enable is enabled in Neutron
value = 'true' if ovn_conf.is_igmp_snooping_enabled() else 'false'
vlan_transparent = (
'true' if network.get('vlan_transparent') else 'false')
params['other_config'] = {ovn_const.MCAST_SNOOP: value,
ovn_const.MCAST_FLOOD_UNREGISTERED: 'false',
ovn_const.VLAN_PASSTHRU: vlan_transparent}
return params
def create_network(self, context, network):
# Create a logical switch with a name equal to the Neutron network
# UUID. This provides an easy way to refer to the logical switch
# without having to track what UUID OVN assigned to it.
lswitch_params = self._gen_network_parameters(network)
lswitch_name = utils.ovn_name(network['id'])
# NOTE(mjozefcz): Remove this workaround when bug
# 1869877 will be fixed.
segments = segments_db.get_network_segments(
context, network['id'])
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(self._nb_idl.ls_add(lswitch_name, **lswitch_params,
may_exist=True))
for segment in segments:
if segment.get(segment_def.PHYSICAL_NETWORK):
self.create_provnet_port(network['id'], segment, txn=txn)
db_rev.bump_revision(context, network, ovn_const.TYPE_NETWORKS)
self.create_metadata_port(context, network)
return network
def delete_network(self, context, network_id):
with self._nb_idl.transaction(check_error=True) as txn:
ls, ls_dns_record = self._nb_idl.get_ls_and_dns_record(
utils.ovn_name(network_id))
txn.add(self._nb_idl.ls_del(utils.ovn_name(network_id),
if_exists=True))
if ls_dns_record:
txn.add(self._nb_idl.dns_del(ls_dns_record.uuid))
db_rev.delete_revision(
context, network_id, ovn_const.TYPE_NETWORKS)
def set_gateway_mtu(self, context, prov_net, txn=None):
ports = self._plugin.get_ports(
context, filters=dict(network_id=[prov_net['id']],
device_owner=[const.DEVICE_OWNER_ROUTER_GW]))
commands = []
for port in ports:
lrp_name = utils.ovn_lrouter_port_name(port['id'])
options = self._gen_router_port_options(port, prov_net)
commands.append(self._nb_idl.lrp_set_options(lrp_name, **options))
self._transaction(commands, txn=txn)
def update_network(self, context, network, original_network=None):
lswitch_name = utils.ovn_name(network['id'])
check_rev_cmd = self._nb_idl.check_revision_number(
lswitch_name, network, ovn_const.TYPE_NETWORKS)
# TODO(numans) - When a network's dns domain name is updated, we need
# to update the DNS records for this network in DNS OVN NB DB table.
# (https://bugs.launchpad.net/networking-ovn/+bug/1777978)
# Eg. if the network n1's dns domain name was "test1" and if it has
# 2 bound ports - p1 and p2, we would have created the below dns
# records
# ===========================
# p1 = P1_IP
# p1.test1 = P1_IP
# p1.default_domain = P1_IP
# p2 = P2_IP
# p2.test1 = P2_IP
# p2.default_domain = P2_IP
# ===========================
# if the network n1's dns domain name is updated to test2, then we need
# to delete the below DNS records
# ===========================
# p1.test1 = P1_IP
# p2.test1 = P2_IP
# ===========================
# and add the new ones
# ===========================
# p1.test2 = P1_IP
# p2.test2 = P2_IP
# ===========================
# in the DNS row for this network.
with self._nb_idl.transaction(check_error=True) as txn:
txn.add(check_rev_cmd)
lswitch_params = self._gen_network_parameters(network)
lswitch = self._nb_idl.get_lswitch(lswitch_name)
txn.add(self._nb_idl.db_set(
'Logical_Switch', lswitch_name, *lswitch_params.items()))
# Check if previous mtu is different than current one,
# checking will help reduce number of operations
if (not lswitch or
lswitch.external_ids.get(
ovn_const.OVN_NETWORK_MTU_EXT_ID_KEY) !=
str(network['mtu'])):
subnets = self._plugin.get_subnets_by_network(
context, network['id'])
for subnet in subnets:
self.update_subnet(context, subnet, network, txn)
if utils.is_provider_network(network):
# make sure to use admin context as this is a providernet
self.set_gateway_mtu(n_context.get_admin_context(),
network, txn)
self._qos_driver.update_network(txn, network, original_network)
if check_rev_cmd.result == ovn_const.TXN_COMMITTED:
db_rev.bump_revision(context, network, ovn_const.TYPE_NETWORKS)
def _add_subnet_dhcp_options(self, subnet, network,
ovn_dhcp_options=None):
if utils.is_dhcp_options_ignored(subnet):
return
if not ovn_dhcp_options:
ovn_dhcp_options = self._get_ovn_dhcp_options(subnet, network)
with self._nb_idl.transaction(check_error=True) as txn:
rev_num = {ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(
utils.get_revision_number(subnet, ovn_const.TYPE_SUBNETS))}
ovn_dhcp_options['external_ids'].update(rev_num)
txn.add(self._nb_idl.add_dhcp_options(subnet['id'],
**ovn_dhcp_options))
def _get_ovn_dhcp_options(self, subnet, network, server_mac=None):
external_ids = {
'subnet_id': subnet['id'],
ovn_const.OVN_REV_NUM_EXT_ID_KEY: str(utils.get_revision_number(
subnet, ovn_const.TYPE_SUBNETS))}
dhcp_options = {'cidr': subnet['cidr'], 'options': {},
'external_ids': external_ids}
if subnet['enable_dhcp']:
if subnet['ip_version'] == const.IP_VERSION_4:
dhcp_options['options'] = self._get_ovn_dhcpv4_opts(
subnet, network, server_mac=server_mac)
else:
dhcp_options['options'] = self._get_ovn_dhcpv6_opts(
subnet, server_id=server_mac)
return dhcp_options
def _process_global_dhcp_opts(self, options, ip_version):
if ip_version == 4:
global_options = ovn_conf.get_global_dhcpv4_opts()
else:
global_options = ovn_conf.get_global_dhcpv6_opts()
for option, value in global_options.items():
if option in ovn_const.GLOBAL_DHCP_OPTS_PROHIBIT_LIST[ip_version]:
# This option is not allowed to be set with a global setting
LOG.debug('DHCP option %s is not permitted to be set in '
'global options. This option will be ignored.',
option)
continue
# If the value is null (i.e. config ntp_server:), treat it as
# a request to remove the option
if value:
options[option] = value
else:
try:
del(options[option])
except KeyError:
# Option not present, job done
pass
def _get_ovn_dhcpv4_opts(self, subnet, network, server_mac=None):
metadata_port_ip = self._find_metadata_port_ip(
n_context.get_admin_context(), subnet)
# TODO(dongj): Currently the metadata port is created only when
# ovn_metadata_enabled is true, therefore this is a restriction for
# supporting DHCP of subnet without gateway IP.
# We will remove this restriction later.
service_id = subnet['gateway_ip'] or metadata_port_ip
if not service_id:
return {}
default_lease_time = str(ovn_conf.get_ovn_dhcp_default_lease_time())
mtu = network['mtu']
options = {
'server_id': service_id,
'lease_time': default_lease_time,
'mtu': str(mtu),
}
if cfg.CONF.dns_domain and cfg.CONF.dns_domain != 'openstacklocal':
# NOTE(mjozefcz): String field should be with quotes,
# otherwise ovn will try to resolve it as variable.
options['domain_name'] = '"%s"' % cfg.CONF.dns_domain
if subnet['gateway_ip']:
options['router'] = subnet['gateway_ip']
if server_mac:
options['server_mac'] = server_mac
else:
options['server_mac'] = n_net.get_random_mac(
cfg.CONF.base_mac.split(':'))
dns_servers = (subnet.get('dns_nameservers') or
ovn_conf.get_dns_servers() or
utils.get_system_dns_resolvers())
if dns_servers:
options['dns_server'] = '{%s}' % ', '.join(dns_servers)
else:
LOG.warning("No relevant dns_servers defined for subnet %s. Check "
"the /etc/resolv.conf file",
subnet['id'])
routes = []
if metadata_port_ip:
routes.append('%s,%s' % (
const.METADATA_V4_CIDR, metadata_port_ip))
# Add subnet host_routes to 'classless_static_route' dhcp option
routes.extend(['%s,%s' % (route['destination'], route['nexthop'])
for route in subnet['host_routes']])
if routes:
# if there are static routes, then we need to add the
# default route in this option. As per RFC 3442 dhcp clients
# should ignore 'router' dhcp option (option 3)
# if option 121 is present.
if subnet['gateway_ip']:
routes.append('0.0.0.0/0,%s' % subnet['gateway_ip'])
options['classless_static_route'] = '{' + ', '.join(routes) + '}'
self._process_global_dhcp_opts(options, ip_version=4)
return options
def _get_ovn_dhcpv6_opts(self, subnet, server_id=None):
"""Returns the DHCPv6 options"""
dhcpv6_opts = {
'server_id': server_id or n_net.get_random_mac(
cfg.CONF.base_mac.split(':'))
}
if subnet['dns_nameservers']:
dns_servers = '{%s}' % ', '.join(subnet['dns_nameservers'])
dhcpv6_opts['dns_server'] = dns_servers
if subnet.get('ipv6_address_mode') == const.DHCPV6_STATELESS:
dhcpv6_opts[ovn_const.DHCPV6_STATELESS_OPT] = 'true'
self._process_global_dhcp_opts(dhcpv6_opts, ip_version=6)
return dhcpv6_opts
def _remove_subnet_dhcp_options(self, subnet_id, txn):
dhcp_options = self._nb_idl.get_subnet_dhcp_options(
subnet_id, with_ports=True)
if dhcp_options['subnet'] is not None:
txn.add(self._nb_idl.delete_dhcp_options(
dhcp_options['subnet']['uuid']))
# Remove subnet and port DHCP_Options rows, the DHCP options in
# lsp rows will be removed by related UUID
for opt in dhcp_options['ports']:
txn.add(self._nb_idl.delete_dhcp_options(opt['uuid']))
def _enable_subnet_dhcp_options(self, subnet, network, txn):
if utils.is_dhcp_options_ignored(subnet):
return
filters = {'fixed_ips': {'subnet_id': [subnet['id']]}}
all_ports = self._plugin.get_ports(n_context.get_admin_context(),
filters=filters)
ports = [p for p in all_ports if not utils.is_network_device_port(p)]
dhcp_options = self._get_ovn_dhcp_options(subnet, network)
subnet_dhcp_cmd = self._nb_idl.add_dhcp_options(subnet['id'],
**dhcp_options)
subnet_dhcp_option = txn.add(subnet_dhcp_cmd)
# Traverse ports to add port DHCP_Options rows
for port in ports:
lsp_dhcp_disabled, lsp_dhcp_opts = utils.get_lsp_dhcp_opts(
port, subnet['ip_version'])
if lsp_dhcp_disabled:
continue
if not lsp_dhcp_opts:
lsp_dhcp_options = subnet_dhcp_option
else:
port_dhcp_options = copy.deepcopy(dhcp_options)
port_dhcp_options['options'].update(lsp_dhcp_opts)
port_dhcp_options['external_ids'].update(
{'port_id': port['id']})
lsp_dhcp_options = txn.add(self._nb_idl.add_dhcp_options(
subnet['id'], port_id=port['id'],
**port_dhcp_options))
columns = ({'dhcpv6_options': lsp_dhcp_options} if
subnet['ip_version'] == const.IP_VERSION_6 else {
'dhcpv4_options': lsp_dhcp_options})
# Set lsp DHCP options
txn.add(self._nb_idl.set_lswitch_port(
lport_name=port['id'],
**columns))
def _update_subnet_dhcp_options(self, subnet, network, txn):
if utils.is_dhcp_options_ignored(subnet):
return
original_options = self._nb_idl.get_subnet_dhcp_options(
subnet['id'])['subnet']
mac = None
if original_options:
if subnet['ip_version'] == const.IP_VERSION_6:
mac = original_options['options'].get('server_id')
else:
mac = original_options['options'].get('server_mac')
new_options = self._get_ovn_dhcp_options(subnet, network, mac)
# Check whether DHCP changed
if (original_options and
original_options['cidr'] == new_options['cidr'] and
original_options['options'] == new_options['options']):
return
txn.add(self._nb_idl.add_dhcp_options(subnet['id'], **new_options))
dhcp_options = self._nb_idl.get_subnet_dhcp_options(
subnet['id'], with_ports=True)
# When a subnet dns_nameserver is updated, then we should update
# the port dhcp options for ports (with no port specific dns_server
# defined).
if 'options' in new_options and 'options' in original_options:
orig_dns_server = original_options['options'].get('dns_server')
new_dns_server = new_options['options'].get('dns_server')
dns_server_changed = (orig_dns_server != new_dns_server)
else:
dns_server_changed = False
for opt in dhcp_options['ports']:
if not new_options.get('options'):
continue
options = dict(new_options['options'])
p_dns_server = opt['options'].get('dns_server')
if dns_server_changed and (orig_dns_server == p_dns_server):
# If port has its own dns_server option defined, then
# orig_dns_server and p_dns_server will not match.
opt['options']['dns_server'] = new_dns_server
options.update(opt['options'])
port_id = opt['external_ids']['port_id']
txn.add(self._nb_idl.add_dhcp_options(
subnet['id'], port_id=port_id, options=options))
def create_subnet(self, context, subnet, network):
if subnet['enable_dhcp']:
if subnet['ip_version'] == 4:
self.update_metadata_port(context, network['id'],
subnet_id=subnet['id'])
self._add_subnet_dhcp_options(subnet, network)
db_rev.bump_revision(context, subnet, ovn_const.TYPE_SUBNETS)
def _modify_subnet_dhcp_options(self, subnet, ovn_subnet, network, txn):
if subnet['enable_dhcp'] and not ovn_subnet:
self._enable_subnet_dhcp_options(subnet, network, txn)
elif subnet['enable_dhcp'] and ovn_subnet:
self._update_subnet_dhcp_options(subnet, network, txn)
elif not subnet['enable_dhcp'] and ovn_subnet:
self._remove_subnet_dhcp_options(subnet['id'], txn)
def update_subnet(self, context, subnet, network, txn=None):
ovn_subnet = self._nb_idl.get_subnet_dhcp_options(
subnet['id'])['subnet']
if subnet['enable_dhcp'] or ovn_subnet:
self.update_metadata_port(context, network['id'],
subnet_id=subnet['id'])
check_rev_cmd = self._nb_idl.check_revision_number(
subnet['id'], subnet, ovn_const.TYPE_SUBNETS)
if not txn:
with self._nb_idl.transaction(check_error=True) as txn_n:
txn_n.add(check_rev_cmd)
self._modify_subnet_dhcp_options(subnet, ovn_subnet, network,
txn_n)
else:
self._modify_subnet_dhcp_options(subnet, ovn_subnet, network, txn)
if check_rev_cmd.result == ovn_const.TXN_COMMITTED:
db_rev.bump_revision(context, subnet, ovn_const.TYPE_SUBNETS)
def delete_subnet(self, context, subnet_id):
with self._nb_idl.transaction(check_error=True) as txn:
self._remove_subnet_dhcp_options(subnet_id, txn)
db_rev.delete_revision(
context, subnet_id, ovn_const.TYPE_SUBNETS)
def create_security_group(self, context, security_group):
with self._nb_idl.transaction(check_error=True) as txn:
ext_ids = {ovn_const.OVN_SG_EXT_ID_KEY: security_group['id']}
name = utils.ovn_port_group_name(security_group['id'])
txn.add(self._nb_idl.pg_add(
name=name, acls=[], external_ids=ext_ids))
# When a SG is created, it comes with some default rules,
# so we'll apply them to the Port Group.
ovn_acl.add_acls_for_sg_port_group(self._nb_idl,
security_group, txn)
db_rev.bump_revision(
context, security_group, ovn_const.TYPE_SECURITY_GROUPS)
def _add_port_to_drop_port_group(self, port, txn):
txn.add(self._nb_idl.pg_add_ports(ovn_const.OVN_DROP_PORT_GROUP_NAME,
port))
def _del_port_from_drop_port_group(self, port, txn):
pg_name = ovn_const.OVN_DROP_PORT_GROUP_NAME
if self._nb_idl.get_port_group(pg_name):
txn.add(self._nb_idl.pg_del_ports(pg_name, port))
def delete_security_group(self, context, security_group_id):
with self._nb_idl.transaction(check_error=True) as txn:
name = utils.ovn_port_group_name(security_group_id)
txn.add(self._nb_idl.pg_del(name=name, if_exists=True))
db_rev.delete_revision(context, security_group_id,
ovn_const.TYPE_SECURITY_GROUPS)
def _process_security_group_rule(self, rule, is_add_acl=True):
admin_context = n_context.get_admin_context()
ovn_acl.update_acls_for_security_group(
self._plugin, admin_context, self._nb_idl,
rule['security_group_id'], rule, is_add_acl=is_add_acl)
def create_security_group_rule(self, context, rule):
self._process_security_group_rule(rule)
db_rev.bump_revision(
context, rule, ovn_const.TYPE_SECURITY_GROUP_RULES)
def delete_security_group_rule(self, context, rule):
self._process_security_group_rule(rule, is_add_acl=False)
db_rev.delete_revision(
context, rule['id'], ovn_const.TYPE_SECURITY_GROUP_RULES)
def _find_metadata_port(self, context, network_id):
if not ovn_conf.is_ovn_metadata_enabled():
return
# TODO(mjozefcz): Remove const.DEVICE_OWNER_DHCP
# from get_ports in W-release.
ports = self._plugin.get_ports(context, filters=dict(
network_id=[network_id],
device_owner=[
const.DEVICE_OWNER_DHCP,
const.DEVICE_OWNER_DISTRIBUTED]))
# TODO(mjozefcz): Remove this compatibility code in W release.
# First look for const.DEVICE_OWNER_DISTRIBUTED and then for
# const.DEVICE_OWNER_DHCP.
for port in ports:
if port['device_owner'] == const.DEVICE_OWNER_DISTRIBUTED:
return port
# Metadata ports are DHCP ports not belonging to the Neutron
# DHCP agents
for port in ports:
if not utils.is_neutron_dhcp_agent_port(port):
return port
def _find_metadata_port_ip(self, context, subnet):
metadata_port = self._find_metadata_port(context, subnet['network_id'])
if metadata_port:
for fixed_ip in metadata_port['fixed_ips']:
if fixed_ip['subnet_id'] == subnet['id']:
return fixed_ip['ip_address']
def create_metadata_port(self, context, network):
if ovn_conf.is_ovn_metadata_enabled():
metadata_port = self._find_metadata_port(context, network['id'])
if not metadata_port:
# Create a neutron port for DHCP/metadata services
port = {'port':
{'network_id': network['id'],
'tenant_id': network['project_id'],
'device_owner': const.DEVICE_OWNER_DISTRIBUTED,
'device_id': 'ovnmeta-%s' % network['id']}}
# TODO(boden): rehome create_port into neutron-lib
p_utils.create_port(self._plugin, context, port)
def update_metadata_port(self, context, network_id, subnet_id=None):
"""Update metadata port.
This function will allocate an IP address for the metadata port of
the given network in all its IPv4 subnets or the given subnet.
"""
def update_metadata_port_fixed_ips(metadata_port, subnet_ids):
wanted_fixed_ips = [
{'subnet_id': fixed_ip['subnet_id'],
'ip_address': fixed_ip['ip_address']} for fixed_ip in
metadata_port['fixed_ips']]
wanted_fixed_ips.extend({'subnet_id': s_id} for s_id in subnet_ids)
port = {'id': metadata_port['id'],
'port': {'network_id': network_id,
'fixed_ips': wanted_fixed_ips}}
self._plugin.update_port(n_context.get_admin_context(),
metadata_port['id'], port)
if not ovn_conf.is_ovn_metadata_enabled():
return
# Retrieve the metadata port of this network
metadata_port = self._find_metadata_port(context, network_id)
if not metadata_port:
LOG.error("Metadata port couldn't be found for network %s",
network_id)
return
port_subnet_ids = set(ip['subnet_id'] for ip in
metadata_port['fixed_ips'])
# If this method is called from "create_subnet" or "update_subnet",
# only the fixed IP address from this subnet should be updated in the
# metadata port.
if subnet_id:
if subnet_id not in port_subnet_ids:
update_metadata_port_fixed_ips(metadata_port, [subnet_id])
return
# Retrieve all subnets in this network
subnets = self._plugin.get_subnets(context, filters=dict(
network_id=[network_id], ip_version=[4]))
subnet_ids = set(s['id'] for s in subnets)
# Find all subnets where metadata port doesn't have an IP in and
# allocate one.
if subnet_ids != port_subnet_ids:
update_metadata_port_fixed_ips(metadata_port,
subnet_ids - port_subnet_ids)
def get_parent_port(self, port_id):
return self._nb_idl.get_parent_port(port_id)
def is_dns_required_for_port(self, port):
try:
if not all([port['dns_name'], port['dns_assignment'],
port['device_id']]):
return False
except KeyError:
# Possible that dns extension is not enabled.
return False
if not self._nb_idl.is_table_present('DNS'):
return False
return True
def get_port_dns_records(self, port):
port_dns_records = {}
net = port.get('network', {})
net_dns_domain = net.get('dns_domain', '').rstrip('.')
for dns_assignment in port.get('dns_assignment', []):
hostname = dns_assignment['hostname']
fqdn = dns_assignment['fqdn'].rstrip('.')
net_dns_fqdn = hostname + '.' + net_dns_domain
if hostname not in port_dns_records:
port_dns_records[hostname] = dns_assignment['ip_address']
if net_dns_domain and net_dns_fqdn != fqdn:
port_dns_records[net_dns_fqdn] = (
dns_assignment['ip_address'])
else:
port_dns_records[hostname] += " " + (
dns_assignment['ip_address'])
if net_dns_domain and net_dns_fqdn != fqdn:
port_dns_records[hostname + '.' + net_dns_domain] += (
" " + dns_assignment['ip_address'])
if fqdn not in port_dns_records:
port_dns_records[fqdn] = dns_assignment['ip_address']
else:
port_dns_records[fqdn] += " " + dns_assignment['ip_address']
return port_dns_records
def add_txns_to_sync_port_dns_records(self, txn, port, original_port=None):
# NOTE(numans): - This implementation has certain known limitations
# and that will be addressed in the future patches
# https://bugs.launchpad.net/networking-ovn/+bug/1739257.
# Please see the bug report for more information, but just to sum up
# here
# - We will have issues if two ports have same dns name
# - If a port is deleted with dns name 'd1' and a new port is
# added with the same dns name 'd1'.
records_to_add = self.get_port_dns_records(port)
lswitch_name = utils.ovn_name(port['network_id'])
ls, ls_dns_record = self._nb_idl.get_ls_and_dns_record(lswitch_name)
# If ls_dns_record is None, then we need to create a DNS row for the
# logical switch.
if ls_dns_record is None:
dns_add_txn = txn.add(self._nb_idl.dns_add(
external_ids={'ls_name': ls.name}, records=records_to_add))
txn.add(self._nb_idl.ls_set_dns_records(ls.uuid, dns_add_txn))
return
if original_port:
old_records = self.get_port_dns_records(original_port)
for old_hostname, old_ips in old_records.items():
if records_to_add.get(old_hostname) != old_ips:
txn.add(self._nb_idl.dns_remove_record(
ls_dns_record.uuid, old_hostname, if_exists=True))
for hostname, ips in records_to_add.items():
if ls_dns_record.records.get(hostname) != ips:
txn.add(self._nb_idl.dns_add_record(
ls_dns_record.uuid, hostname, ips))
def add_txns_to_remove_port_dns_records(self, txn, port):
lswitch_name = utils.ovn_name(port['network_id'])
ls, ls_dns_record = self._nb_idl.get_ls_and_dns_record(lswitch_name)
if ls_dns_record is None:
return
net = port.get('network', {})
net_dns_domain = net.get('dns_domain', '').rstrip('.')
hostnames = []
for dns_assignment in port['dns_assignment']:
hostname = dns_assignment['hostname']
fqdn = dns_assignment['fqdn'].rstrip('.')
if hostname not in hostnames:
hostnames.append(hostname)
net_dns_fqdn = hostname + '.' + net_dns_domain
if net_dns_domain and net_dns_fqdn != fqdn:
hostnames.append(net_dns_fqdn)
if fqdn not in hostnames:
hostnames.append(fqdn)
for hostname in hostnames:
if ls_dns_record.records.get(hostname):
txn.add(self._nb_idl.dns_remove_record(
ls_dns_record.uuid, hostname, if_exists=True))
| StarcoderdataPython |
3227012 | import numpy as np
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold, StratifiedKFold
import multiprocessing as mp
from model_eval_mse import ae_eval, vae_binned_eval, vae_eval
# ============================================
# hyperparameter exploration
# ============================================
class HyperParaEvaluator():
def __init__(self,
activity, # original activity
X, # binned actvity
idx_trials, frame_trial, maze_position,choFrameOffsets, # inforation needed to pick activity and aligned
n_models, n_split, cv_fold, n_process, # exploration setting
intermediate_dim_list,latent_dim_list, latent_fac_list, # hyperparameters to explore
epochs_train, epochs_test, batch_size):
# data
self.activity = activity
self.X = X
_, self.n_bin, self.n_neuron = X.shape
# necessary trial information
self.idx_trials = idx_trials
self.frame_trial = frame_trial
self.maze_position = maze_position
self.choFrameOffsets=choFrameOffsets
# evalution setting
self.n_models = n_models
self.n_split = n_split
self.cv_fold = cv_fold
self.n_process = n_process
self.intermediate_dim_list=intermediate_dim_list
self.latent_dim_list = latent_dim_list
self.latent_fac_list = latent_fac_list
self.epochs_train = epochs_train
self.epochs_test = epochs_test
self.batch_size = batch_size
def _split_cv_hyperpara(self, split_var, fold_var, intermediate_dim,latent_dim, latent_fac):
"""
for each hyperparameter in a fold in a split, send the job to a processor
:param split_var: int, idx of current split
fold_var: int, idx of current fold
latent_dim: int, hyperparameter
latent_fac: int, hyperparameter
:return:
"""
# each time with a different train-test split
trainval_pos = self.trainval_pos_splits[split_var]
train_index = self.train_index_split_cv[split_var][fold_var]
val_index = self.val_index_split_cv[split_var][fold_var]
inter_idx=np.where(np.array(self.intermediate_dim_list) == intermediate_dim)[0][0]
dim_idx = np.where(np.array(self.latent_dim_list) == latent_dim)[0][0]
fac_idx = np.where(np.array(self.latent_fac_list) == latent_fac)[0][0]
train_idx_list = [self.idx_trials[trainval_pos[i]] for i in train_index]
val_idx_list = [self.idx_trials[trainval_pos[i]] for i in val_index]
bin_training_data = self.X[trainval_pos[train_index], :, :]
bin_validation_data = self.X[trainval_pos[val_index], :, :]
nobin_training_data = [self.activity[self.frame_trial == self.idx_trials[trainval_pos[i]]] for i in train_index]
nobin_validation_data = [self.activity[self.frame_trial == self.idx_trials[trainval_pos[i]]] for i in val_index]
# 1. ae
mse_val_maze, mse_train_maze,mse_val_ITI,mse_train_ITI= ae_eval(bin_training_data, bin_validation_data, True,
intermediate_dim,latent_dim, latent_fac,
epochs=self.epochs_train, batch_size=self.batch_size)
ae_train_mse_maze=((0,split_var, fold_var, inter_idx,dim_idx, fac_idx),mse_train_maze)
ae_val_mse_maze=((1,split_var, fold_var, inter_idx,dim_idx, fac_idx),mse_val_maze)
ae_train_mse_ITI = ((2, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_train_ITI)
ae_val_mse_ITI = ((3, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_val_ITI)
# 2. vae_binned
mse_val_maze, mse_train_maze,mse_val_ITI,mse_train_ITI= vae_binned_eval(bin_training_data, bin_validation_data, True,
intermediate_dim,latent_dim, latent_fac,
epochs=self.epochs_train, batch_size=self.batch_size)
vae_binned_train_mse_maze = ((4, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_train_maze)
vae_binned_val_mse_maze = ((5, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_val_maze)
vae_binned_train_mse_ITI = ((6, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_train_ITI)
vae_binned_val_mse_ITI = ((7, split_var, fold_var, inter_idx, dim_idx, fac_idx), mse_val_ITI)
# 3.vae
mse_val_maze, mse_train_maze,mse_val_ITI,mse_train_ITI= vae_eval(train_idx_list, val_idx_list,
self.frame_trial, self.maze_position,self.choFrameOffsets,
nobin_training_data, nobin_validation_data, True,
intermediate_dim,latent_dim, latent_fac,
self.epochs_train, batch_size=self.batch_size)
vae_train_mse_maze = ((8, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_train_maze)
vae_val_mse_maze = ((9, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_val_maze)
vae_train_mse_ITI = ((10, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_train_ITI)
vae_val_mse_ITI = ((11, split_var, fold_var, inter_idx,dim_idx, fac_idx), mse_val_ITI)
return (ae_train_mse_maze,ae_val_mse_maze,ae_train_mse_ITI,ae_val_mse_ITI,
vae_binned_train_mse_maze,vae_binned_val_mse_maze,vae_binned_train_mse_ITI,vae_binned_val_mse_ITI,
vae_train_mse_maze,vae_val_mse_maze,vae_train_mse_ITI,vae_val_mse_ITI)
def _unpack(self, idx):
a = np.zeros(self.n_split * self.cv_fold * len(self.intermediate_dim_list) * len(self.latent_dim_list) * len(
self.latent_fac_list))
for i in range(self.n_split * self.cv_fold * len(self.intermediate_dim_list) * len(self.latent_dim_list) * len(
self.latent_fac_list)):
a[i] = i
a = a.reshape((self.n_split, self.cv_fold, len(self.intermediate_dim_list), len(self.latent_dim_list),
len(self.latent_fac_list)))
split_var = list(range(self.n_split))[np.where(a == idx)[0][0]]
fold_var = list(range(self.cv_fold))[np.where(a == idx)[1][0]]
intermediate_dim = self.intermediate_dim_list[np.where(a == idx)[2][0]]
latent_dim = self.latent_dim_list[np.where(a == idx)[3][0]]
latent_fac = self.latent_fac_list[np.where(a == idx)[4][0]]
result = self._split_cv_hyperpara(split_var, fold_var, intermediate_dim, latent_dim, latent_fac)
return result
def _collect_result(self, result):
self.results.append(result)
print(len(self.results))
def evaluate(self):
'explore the influence of hyperparameters on reconstruction performance'
# ============================================
# explore hyperparameters through multiple split and cross validation
# ============================================
self.trainval_pos_splits = {}
self.testing_pos_splits = {}
self.train_index_split_cv = {}
for i in range(self.n_split):
self.train_index_split_cv[i] = {}
self.val_index_split_cv = {}
for i in range(self.n_split):
self.val_index_split_cv[i] = {}
for split_var in range(self.n_split):
# each time with a different train-test split
pos = np.array(range(self.idx_trials.shape[0]))
np.random.shuffle(pos)
trainval_pos, testing_pos = train_test_split(pos, test_size=0.167)
self.trainval_pos_splits[split_var] = trainval_pos
self.testing_pos_splits[split_var] = testing_pos
kf = KFold(n_splits=self.cv_fold)
fold_var = 0
for train_index, val_index in kf.split(trainval_pos):
self.train_index_split_cv[split_var][fold_var] = train_index
self.val_index_split_cv[split_var][fold_var] = val_index
fold_var = fold_var + 1
self.results=[]
pool = mp.Pool(self.n_process)
num = self.n_split * self.cv_fold * len(self.intermediate_dim_list)*\
len(self.latent_dim_list) * len(self.latent_fac_list)
for idx in range(num):
print(idx)
pool.apply_async(self._unpack, args=(idx,), callback=self._collect_result)
pool.close()
pool.join()
self.mse_cv_summary = np.zeros(
(4 * self.n_models, self.n_split, self.cv_fold,
len(self.intermediate_dim_list),len(self.latent_dim_list), len(self.latent_fac_list)))
for i in range(len(self.results)):
pack_i=self.results[i]
for j in range(12): # for the 12 items in each pack
self.mse_cv_summary[pack_i[j][0]]=pack_i[j][1]
np.save('mse_cv_summary_0715.npy', self.mse_cv_summary)
# ============================================
# choose optimal hyperparameters and test
# ============================================
# the following lists will eventually have length = # of split replications
# will be packed into a [15, n_split] array to return
chosen_intermediate_dim_ae=[]
chosen_latent_dim_ae = []
chosen_latent_fac_ae = []
chosen_intermediate_dim_vae_binned = []
chosen_latent_dim_vae_binned = []
chosen_latent_fac_vae_binned = []
chosen_intermediate_dim_vae = []
chosen_latent_dim_vae = []
chosen_latent_fac_vae = []
mse_test_ae_maze = []
mse_test_ae_ITI = []
mse_test_vae_binned_maze = []
mse_test_vae_binned_ITI = []
mse_test_vae_maze = []
mse_test_vae_ITI=[]
for split_var in range(self.n_split):
trainval_pos = self.trainval_pos_splits[split_var]
testing_pos = self.testing_pos_splits[split_var]
trainval_idx_list = [self.idx_trials[i] for i in trainval_pos]
test_idx_list = [self.idx_trials[i] for i in testing_pos]
all_bin_training_data = self.X[trainval_pos, :, :]
all_bin_testing_data = self.X[testing_pos, :, :]
all_nobin_training_data = [self.activity[self.frame_trial == self.idx_trials[i]] for i in trainval_pos]
all_nobin_testing_data = [self.activity[self.frame_trial == self.idx_trials[i]] for i in testing_pos]
mse_val_ae_maze = self.mse_cv_summary[1]
mse_val_vae_binned_maze = self.mse_cv_summary[5]
mse_val_vae_maze = self.mse_cv_summary[9]
ave_mse_val_ae_maze = np.average(mse_val_ae_maze[split_var, :, :, :,:], axis=0)
ave_mse_val_vae_binned_maze = np.average(mse_val_vae_binned_maze[split_var, :, :, :,:], axis=0)
ave_mse_val_vae_maze = np.average(mse_val_vae_maze[split_var, :, :, :,:], axis=0)
mse_val_ae_ITI = self.mse_cv_summary[1]
mse_val_vae_binned_ITI = self.mse_cv_summary[5]
mse_val_vae_ITI = self.mse_cv_summary[9]
ave_mse_val_ae_ITI = np.average(mse_val_ae_ITI[split_var, :, :, :, :], axis=0)
ave_mse_val_vae_binned_ITI = np.average(mse_val_vae_binned_ITI[split_var, :, :, :, :], axis=0)
ave_mse_val_vae_ITI = np.average(mse_val_vae_ITI[split_var, :, :, :, :], axis=0)
ave_mse_val_ae=(20*ave_mse_val_ae_maze+15*ave_mse_val_ae_ITI)/35
ave_mse_val_vae_binned = (20 * ave_mse_val_vae_binned_maze + 15 * ave_mse_val_vae_binned_ITI) / 35
ave_mse_val_vae = (20 * ave_mse_val_vae_maze + 15 * ave_mse_val_vae_ITI) / 35
# ae
intermediate_dim=self.intermediate_dim_list[np.where(ave_mse_val_ae == np.min(ave_mse_val_ae))[0][0]]
latent_dim = self.latent_dim_list[np.where(ave_mse_val_ae == np.min(ave_mse_val_ae))[1][0]]
latent_fac = self.latent_fac_list[np.where(ave_mse_val_ae == np.min(ave_mse_val_ae))[2][0]]
chosen_intermediate_dim_ae.append(intermediate_dim)
chosen_latent_dim_ae.append(latent_dim)
chosen_latent_fac_ae.append(latent_fac)
mse_test_maze, _,mse_test_ITI,_ = ae_eval(all_bin_training_data, all_bin_testing_data, False,
intermediate_dim,latent_dim, latent_fac,
epochs=self.epochs_test, batch_size=self.batch_size)
mse_test_ae_maze.append(mse_test_maze)
mse_test_ae_ITI.append(mse_test_ITI)
# vae_binned
intermediate_dim = self.intermediate_dim_list[np.where(ave_mse_val_vae_binned == np.min(ave_mse_val_vae_binned))[0][0]]
latent_dim = self.latent_dim_list[np.where(ave_mse_val_vae_binned == np.min(ave_mse_val_vae_binned))[1][0]]
latent_fac = self.latent_fac_list[np.where(ave_mse_val_vae_binned == np.min(ave_mse_val_vae_binned))[2][0]]
chosen_intermediate_dim_vae_binned.append(intermediate_dim)
chosen_latent_dim_vae_binned.append(latent_dim)
chosen_latent_fac_vae_binned.append(latent_fac)
mse_test, _,mse_test_ITI,_ = vae_binned_eval(all_bin_training_data, all_bin_testing_data, False,
intermediate_dim,latent_dim, latent_fac,
epochs=self.epochs_test, batch_size=self.batch_size)
mse_test_vae_binned_maze.append(mse_test_maze)
mse_test_vae_binned_ITI.append(mse_test_ITI)
# vae
intermediate_dim = self.intermediate_dim_list[np.where(ave_mse_val_vae == np.min(ave_mse_val_vae))[0][0]]
latent_dim = self.latent_dim_list[np.where(ave_mse_val_vae == np.min(ave_mse_val_vae))[1][0]]
latent_fac = self.latent_fac_list[np.where(ave_mse_val_vae == np.min(ave_mse_val_vae))[2][0]]
chosen_intermediate_dim_vae.append(intermediate_dim)
chosen_latent_dim_vae.append(latent_dim)
chosen_latent_fac_vae.append(latent_fac)
mse_test, _ ,mse_test_ITI,_= vae_eval(trainval_idx_list, test_idx_list,
self.frame_trial, self.maze_position,self.choFrameOffsets,
all_nobin_training_data, all_nobin_testing_data, False,
intermediate_dim,latent_dim, latent_fac, self.epochs_test, self.batch_size)
mse_test_vae_maze.append(mse_test)
mse_test_vae_ITI.append(mse_test_ITI)
# ============================================
# pack returning results
# ============================================
self.hyperpara_result = np.zeros((15, self.n_split))
self.hyperpara_result[0, :] = chosen_intermediate_dim_ae
self.hyperpara_result[1, :] = chosen_latent_dim_ae
self.hyperpara_result[2, :] = chosen_latent_fac_ae
self.hyperpara_result[3, :] = chosen_intermediate_dim_vae_binned
self.hyperpara_result[4, :] = chosen_latent_dim_vae_binned
self.hyperpara_result[5, :] = chosen_latent_fac_vae_binned
self.hyperpara_result[6, :] = chosen_intermediate_dim_vae
self.hyperpara_result[7, :] = chosen_latent_dim_vae
self.hyperpara_result[8, :] = chosen_latent_fac_vae
self.hyperpara_result[9, :] = mse_test_ae_maze
self.hyperpara_result[10, :] =mse_test_ae_ITI
self.hyperpara_result[11, :] =mse_test_vae_binned_maze
self.hyperpara_result[12, :] =mse_test_vae_binned_ITI
self.hyperpara_result[13, :] =mse_test_vae_maze
self.hyperpara_result[14, :] =mse_test_vae_ITI
return self.mse_cv_summary,self.hyperpara_result
| StarcoderdataPython |
3258119 | import os
import pytest
from main import app, db
from models import User
@pytest.fixture
def client():
app.config['TESTING'] = True
os.environ["DATABASE_URL"] = "sqlite:///:memory:"
client = app.test_client()
cleanup() # clean up before every test
db.create_all()
yield client
def test_home_page(client):
response = client.get('/')
assert b'/static/img/secretcode.jpg' in response.data
def test_index_not_logged_in(client):
response = client.get('/index')
# This is the actual test
assert b'Enter your name' in response.data
def test_index_logged_in(client):
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
response = client.get('/index')
assert b'Uw gok?' in response.data
def test_index_wrong_password(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# POST
response = client.post('/login', data={"user-name": "TestUser",
"user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
assert b'WRONG PASSWORD!' in response.data
def test_result_correct(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# get the first (and only) user object from the database
user = db.query(User).first()
# set the secret number to 22, so that you can make a success "guess" in the test.
user.secret_number = 22
db.add(user)
db.commit()
response = client.post('/result', data={"guess": 22}) # enter the correct guess
assert b'Het geheime nummer is inderdaad 22' in response.data
def test_result_te_klein(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# get the first (and only) user object from the database
user = db.query(User).first()
# set the secret number to 22, so that you can make a success "guess" in the test.
user.secret_number = 22
db.add(user)
db.commit()
response = client.post('/result', data={"guess": 20}) # guess is too small
assert b'try something bigger.' in response.data
def test_result_te_groot(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# get the first (and only) user object from the database
user = db.query(User).first()
# set the secret number to 22, so that you can make a success "guess" in the test.
user.secret_number = 22
db.add(user)
db.commit()
response = client.post('/result', data={"guess": 24}) # guess is too big
assert b'try something smaller.' in response.data
def test_result_out_of_bound_low(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# get the first (and only) user object from the database
user = db.query(User).first()
# set the secret number to 22, so that you can make a success "guess" in the test.
user.secret_number = 22
db.add(user)
db.commit()
response = client.post('/result', data={"guess": 0}) # guess is out of bound
assert b'Het getal moet tussen 1 en 30 liggen.' in response.data
def test_result_out_of_bound_high(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# get the first (and only) user object from the database
user = db.query(User).first()
# set the secret number to 22, so that you can make a success "guess" in the test.
user.secret_number = 22
db.add(user)
db.commit()
response = client.post('/result', data={"guess": 31}) # guess is out of bound
assert b'Het getal moet tussen 1 en 30 liggen.' in response.data
def test_result_geen_getal(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# get the first (and only) user object from the database
user = db.query(User).first()
# set the secret number to 22, so that you can make a success "guess" in the test.
user.secret_number = 22
db.add(user)
db.commit()
response = client.post('/result', data={"guess": "Axel"}) # guess is geen getal
assert b'Dat was geen (geheel) getal.' in response.data
def test_myprofile(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
response = client.get('/profile')
# Added second part of the AND per comment of Raphael
assert b'TestUser' in response.data and b'YOUR PROFILE' in response.data
def test_profile_edit(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# GET
response = client.get('/profile/edit')
# Added second part of the AND per comment of Raphael
assert b'TestUser' in response.data and b'EDIT YOUR PROFILE' in response.data
# POST
response = client.post('/profile/edit', data={"profile-name": "TestUser2",
"profile-email": "<EMAIL>",
"profile-password": "<PASSWORD>"}, follow_redirects=True)
assert b'<EMAIL>' in response.data
def test_profile_delete(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# GET
response = client.get('/profile/delete')
# Added second part of the AND per comment of Raphael
assert b'TestUser' in response.data and b'DELETE YOUR PROFILE' in response.data
# POST
response = client.post('/profile/delete', follow_redirects=True)
assert b'/static/img/secretcode.jpg' in response.data # redirected back to the index site
def test_all_users(client):
response = client.get('/users')
assert b'<H3>USERS</H3>' in response.data
assert b'TestUser' not in response.data # TestUser is not yet created
# create a new user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
response = client.get('/users')
assert b'<H3>USERS</H3>' in response.data
assert b'TestUser' in response.data
def test_user_details(client):
# create a new user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# get user object from the database
user = db.query(User).first()
response = client.get('/user/{}'.format(user.id))
assert b'<EMAIL>' in response.data
assert b'TestUser' in response.data
def test_logout(client):
# create a user
client.post('/login', data={"user-name": "TestUser", "user-email": "<EMAIL>",
"user-password": "<PASSWORD>"}, follow_redirects=True)
# And then logout again
response = client.get('/')
assert b'/static/img/secretcode.jpg' in response.data
def cleanup():
# clean up/delete the DB (drop all tables in the database)
db.drop_all()
| StarcoderdataPython |
115823 | import getpass
import os
import sys
import math
from io import StringIO
import shutil
import datetime
from os.path import splitext
from difflib import unified_diff
import pytest
from astropy.io import fits
from astropy.io.fits import FITSDiff
from astropy.utils.data import conf
import numpy as np
import stwcs
from stsci.tools import fileutil
from ci_watson.artifactory_helpers import get_bigdata, generate_upload_schema
from ci_watson.hst_helpers import download_crds, ref_from_image
# Base classes for actual tests.
# NOTE: Named in a way so pytest will not pick them up here.
@pytest.mark.bigdata
class BaseCal:
prevdir = os.getcwd()
use_ftp_crds = True
timeout = 30 # seconds
tree = 'dev'
# Numpy default for allclose comparison
rtol = 1e-6
atol = 1e-5
# To be defined by instrument
refstr = ''
prevref = ''
input_loc = ''
ref_loc = ''
ignore_keywords = []
# To be defined by individual test
subdir = ''
@pytest.fixture(autouse=True)
def setup_class(self, tmpdir, envopt, pytestconfig):
"""
Run test in own dir so we can keep results separate from
other tests.
"""
if not tmpdir.ensure(self.subdir, dir=True):
p = tmpdir.mkdir(self.subdir).strpath
else:
p = tmpdir.join(self.subdir).strpath
os.chdir(p)
# NOTE: This could be explicitly controlled using pytest fixture
# but too many ways to do the same thing would be confusing.
# Refine this logic if using pytest fixture.
# HSTCAL cannot open remote CRDS on FTP but central storage is okay.
# So use central storage if available to avoid FTP.
if self.prevref is None or self.prevref.startswith(('ftp', 'http')):
os.environ[self.refstr] = p + os.sep
self.use_ftp_crds = True
# Turn off Astrometry updates
os.environ['ASTROMETRY_STEP_CONTROL'] = 'OFF'
# This controls astropy.io.fits timeout
conf.remote_timeout = self.timeout
# Update tree to point to correct environment
self.tree = envopt
# Collect pytest configuration values specified in setup.cfg or pytest.ini
self.inputs_root = pytestconfig.getini('inputs_root')[0]
self.results_root = pytestconfig.getini('results_root')[0]
def teardown_class(self):
"""Reset path and variables."""
conf.reset('remote_timeout')
os.chdir(self.prevdir)
if self.use_ftp_crds and self.prevref is not None:
os.environ[self.refstr] = self.prevref
def get_data(self, *args):
"""
Download `filename` into working directory using
`get_bigdata`. This will then return the full path to
the local copy of the file.
"""
local_file = get_bigdata(self.inputs_root, self.tree, self.input_loc, *args)
return local_file
def get_input_file(self, *args, refsep='$'):
"""
Download or copy input file (e.g., RAW) into the working directory.
The associated CRDS reference files in ``refstr`` are also
downloaded, if necessary.
"""
filename = self.get_data(*args)
ref_files = ref_from_image(filename, ['IDCTAB', 'OFFTAB', 'NPOLFILE', 'D2IMFILE', 'DGEOFILE'])
print("Looking for REF_FILES: {}".format(ref_files))
for ref_file in ref_files:
if ref_file.strip() == '':
continue
if refsep not in ref_file: # Local file
refname = self.get_data('customRef', ref_file)
else: # Download from FTP, if applicable
refname = os.path.join(ref_file)
if self.use_ftp_crds:
download_crds(refname, self.timeout)
return filename
def compare_outputs(self, outputs, raise_error=True):
"""
Compare output with "truth" using appropriate
diff routine; namely,
``fitsdiff`` for FITS file comparisons
``unified_diff`` for ASCII products.
Parameters
----------
outputs : list of tuple
A list of tuples, each containing filename (without path)
of CALXXX output and truth, in that order.
raise_error : bool
Raise ``AssertionError`` if difference is found.
Returns
-------
report : str
Report from ``fitsdiff``.
This is part of error message if ``raise_error=True``.
"""
all_okay = True
creature_report = ''
# Create instructions for uploading results to artifactory for use
# as new comparison/truth files
testpath, testname = os.path.split(os.path.abspath(os.curdir))
# organize results by day test was run...could replace with git-hash
whoami = getpass.getuser() or 'nobody'
dt = datetime.datetime.now().strftime("%d%b%YT")
ttime = datetime.datetime.now().strftime("%H_%M_%S")
user_tag = 'NOT_CI_{}_{}'.format(whoami, ttime)
build_tag = os.environ.get('BUILD_TAG', user_tag)
build_suffix = os.environ.get('BUILD_MATRIX_SUFFIX', 'standalone')
testdir = "{}_{}_{}".format(testname, build_tag, build_suffix)
tree = os.path.join(self.results_root, self.input_loc,
dt, testdir) + os.sep
updated_outputs = []
for actual, desired in outputs:
# Get "truth" image
s = self.get_data('truth', desired)
if s is not None:
desired = s
if actual.endswith('fits'):
# Working with FITS files...
fdiff = FITSDiff(actual, desired, rtol=self.rtol, atol=self.atol,
ignore_keywords=self.ignore_keywords)
creature_report += fdiff.report()
if not fdiff.identical:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not fdiff.identical and all_okay:
all_okay = False
else:
# Try ASCII-based diff
with open(actual) as afile:
actual_lines = afile.readlines()
with open(desired) as dfile:
desired_lines = dfile.readlines()
udiff = unified_diff(actual_lines, desired_lines,
fromfile=actual, tofile=desired)
old_stdout = sys.stdout
udiffIO = StringIO()
sys.stdout = udiffIO
sys.stdout.writelines(udiff)
sys.stdout = old_stdout
udiff_report = udiffIO.getvalue()
creature_report += udiff_report
if len(udiff_report) > 2 and all_okay:
all_okay = False
if len(udiff_report) > 2:
# Only keep track of failed results which need to
# be used to replace the truth files (if OK).
updated_outputs.append((actual, desired))
if not all_okay:
# Write out JSON file to enable retention of different results
new_truths = [os.path.abspath(i[1]) for i in updated_outputs]
for files in updated_outputs:
print("Renaming {} as new 'truth' file: {}".format(
files[0], files[1]))
shutil.move(files[0], files[1])
log_pattern = [os.path.join(os.path.dirname(x), '*.log') for x in new_truths]
generate_upload_schema(pattern=new_truths + log_pattern,
testname=testname,
target= tree)
if not all_okay and raise_error:
raise AssertionError(os.linesep + creature_report)
return creature_report
class BaseACS(BaseCal):
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseACSHRC(BaseACS):
input_loc = 'acs/hrc'
ref_loc = 'acs/hrc/ref'
class BaseACSWFC(BaseACS):
input_loc = 'acs/wfc'
ref_loc = 'acs/wfc/ref'
class BaseWFC3(BaseCal):
refstr = 'iref'
input_loc = 'wfc3'
ref_loc = 'wfc3/ref'
prevref = os.environ.get(refstr)
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseSTIS(BaseCal):
refstr = 'oref'
prevref = os.environ.get(refstr)
input_loc = 'stis'
ref_loc = 'stis/ref'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
class BaseWFPC2(BaseCal):
refstr = 'uref'
prevref = os.environ.get(refstr)
input_loc = 'wfpc2'
ref_loc = 'wfpc2/ref'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
def centroid_compare(centroid):
return centroid[1]
class BaseUnit(BaseCal):
buff = 0
refstr = 'jref'
prevref = os.environ.get(refstr)
input_loc = 'acs'
ref_loc = 'acs'
ignore_keywords = ['origin', 'filename', 'date', 'iraf-tlm', 'fitsdate',
'upwtim', 'wcscdate', 'upwcsver', 'pywcsver',
'history', 'prod_ver', 'rulefile']
atol = 1.0e-5
def bound_image(self, image):
"""
Compute region where image is non-zero
"""
coords = np.nonzero(image)
ymin = coords[0].min()
ymax = coords[0].max()
xmin = coords[1].min()
xmax = coords[1].max()
return (ymin, ymax, xmin, xmax)
def centroid(self, image, size, center):
"""
Compute the centroid of a rectangular area
"""
ylo = int(center[0]) - size // 2
yhi = min(ylo + size, image.shape[0])
xlo = int(center[1]) - size // 2
xhi = min(xlo + size, image.shape[1])
center = [0.0, 0.0, 0.0]
for y in range(ylo, yhi):
for x in range(xlo, xhi):
center[0] += y * image[y,x]
center[1] += x * image[y,x]
center[2] += image[y,x]
if center[2] == 0.0: return None
center[0] /= center[2]
center[1] /= center[2]
return center
def centroid_close(self, list_of_centroids, size, point):
"""
Find if any centroid is close to a point
"""
for i in range(len(list_of_centroids)-1, -1, -1):
if (abs(list_of_centroids[i][0] - point[0]) < size / 2 and
abs(list_of_centroids[i][1] - point[1]) < size / 2):
return 1
return 0
def centroid_distances(self, image1, image2, amp, size):
"""
Compute a list of centroids and the distances between them in two images
"""
distances = []
list_of_centroids, lst_pts = self.centroid_list(image2, amp, size)
for center2, pt in zip(list_of_centroids, lst_pts):
center1 = self.centroid(image1, size, pt)
if center1 is None: continue
disty = center2[0] - center1[0]
distx = center2[1] - center1[1]
dist = math.sqrt(disty * disty + distx * distx)
dflux = abs(center2[2] - center1[2])
distances.append([dist, dflux, center1, center2])
distances.sort(key=centroid_compare)
return distances
def centroid_list(self, image, amp, size):
"""
Find the next centroid
"""
list_of_centroids = []
list_of_points = []
points = np.transpose(np.nonzero(image > amp))
for point in points:
if not self.centroid_close(list_of_centroids, size, point):
center = self.centroid(image, size, point)
list_of_centroids.append(center)
list_of_points.append(point)
return list_of_centroids, list_of_points
def centroid_statistics(self, title, fname, image1, image2, amp, size):
"""
write centroid statistics to compare differences btw two images
"""
stats = ("minimum", "median", "maximum")
images = (None, None, image1, image2)
im_type = ("", "", "test", "reference")
diff = []
distances = self.centroid_distances(image1, image2, amp, size)
indexes = (0, len(distances)//2, len(distances)-1)
fd = open(fname, 'w')
fd.write("*** %s ***\n" % title)
if len(distances) == 0:
diff = [0.0, 0.0, 0.0]
fd.write("No matches!!\n")
elif len(distances) == 1:
diff = [distances[0][0], distances[0][0], distances[0][0]]
fd.write("1 match\n")
fd.write("distance = %f flux difference = %f\n" % (distances[0][0], distances[0][1]))
for j in range(2, 4):
ylo = int(distances[0][j][0]) - (1+self.buff)
yhi = int(distances[0][j][0]) + (2+self.buff)
xlo = int(distances[0][j][1]) - (1+self.buff)
xhi = int(distances[0][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s image centroid = (%f,%f) image flux = %f\n" %
(im_type[j], distances[0][j][0], distances[0][j][1], distances[0][j][2]))
fd.write(str(subimage) + "\n")
else:
fd.write("%d matches\n" % len(distances))
for k in range(0,3):
i = indexes[k]
diff.append(distances[i][0])
fd.write("\n%s distance = %f flux difference = %f\n" % (stats[k], distances[i][0], distances[i][1]))
for j in range(2, 4):
ylo = int(distances[i][j][0]) - (1+self.buff)
yhi = int(distances[i][j][0]) + (2+self.buff)
xlo = int(distances[i][j][1]) - (1+self.buff)
xhi = int(distances[i][j][1]) + (2+self.buff)
subimage = images[j][ylo:yhi,xlo:xhi]
fd.write("\n%s %s image centroid = (%f,%f) image flux = %f\n" %
(stats[k], im_type[j], distances[i][j][0], distances[i][j][1], distances[i][j][2]))
fd.write(str(subimage) + "\n")
fd.close()
return tuple(diff)
def make_point_image(self, input_image, point, value):
"""
Create an image with a single point set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
output_image[point] = value
return output_image
def make_grid_image(self, input_image, spacing, value):
"""
Create an image with points on a grid set
"""
output_image = np.zeros(input_image.shape, dtype=input_image.dtype)
shape = output_image.shape
for y in range(spacing//2, shape[0], spacing):
for x in range(spacing//2, shape[1], spacing):
output_image[y,x] = value
return output_image
def print_wcs(self, title, wcs):
"""
Print the wcs header cards
"""
print("=== %s ===" % title)
print(wcs.to_header_string())
def read_image(self, filename):
"""
Read the image from a fits file
"""
hdu = fits.open(filename)
image = hdu[1].data
hdu.close()
return image
def read_wcs(self, filename):
"""
Read the wcs of a fits file
"""
hdu = fits.open(filename)
wcs = stwcs.wcsutil.HSTWCS(hdu, 1)
hdu.close()
return wcs
def write_wcs(self, hdu, image_wcs):
"""
Update header with WCS keywords
"""
hdu.header['ORIENTAT'] = image_wcs.orientat
hdu.header['CD1_1'] = image_wcs.wcs.cd[0][0]
hdu.header['CD1_2'] = image_wcs.wcs.cd[0][1]
hdu.header['CD2_1'] = image_wcs.wcs.cd[1][0]
hdu.header['CD2_2'] = image_wcs.wcs.cd[1][1]
hdu.header['CRVAL1'] = image_wcs.wcs.crval[0]
hdu.header['CRVAL2'] = image_wcs.wcs.crval[1]
hdu.header['CRPIX1'] = image_wcs.wcs.crpix[0]
hdu.header['CRPIX2'] = image_wcs.wcs.crpix[1]
hdu.header['CTYPE1'] = image_wcs.wcs.ctype[0]
hdu.header['CTYPE2'] = image_wcs.wcs.ctype[1]
hdu.header['VAFACTOR'] = 1.0
def write_image(self, filename, wcs, *args):
"""
Read the image from a fits file
"""
extarray = ['SCI', 'WHT', 'CTX']
pimg = fits.HDUList()
phdu = fits.PrimaryHDU()
phdu.header['NDRIZIM'] = 1
phdu.header['ROOTNAME'] = filename
pimg.append(phdu)
for img in args:
# Create a MEF file with the specified extname
extn = extarray.pop(0)
extname = fileutil.parseExtn(extn)
ehdu = fits.ImageHDU(data=img)
ehdu.header['EXTNAME'] = extname[0]
ehdu.header['EXTVER'] = extname[1]
self.write_wcs(ehdu, wcs)
pimg.append(ehdu)
pimg.writeto(filename)
del pimg
def add_suffix(fname, suffix, range=None):
"""Add suffix to file name
Parameters
----------
fname: str
The file name to add the suffix to
suffix: str
The suffix to add_suffix
range: range
If specified, the set of indexes will be added to the
outputs.
Returns
-------
fname, fname_with_suffix
2-tuple of the original file name and name with suffix.
If `range` is defined, `fname_with_suffix` will be a list.
"""
fname_root, fname_ext = splitext(fname)
if range is None:
with_suffix = ''.join([
fname_root,
'_',
suffix,
fname_ext
])
else:
with_suffix = []
for idx in range:
with_suffix.append(''.join([
fname_root,
'_',
str(idx),
'_',
suffix,
fname_ext
]))
return fname, with_suffix
| StarcoderdataPython |
3380574 | <gh_stars>0
import ipywidgets as ipw
def get_start_widget(appbase, jupbase):
#http://fontawesome.io/icons/
template = """
<table>
<tr>
<th style="text-align:center">Structures</th>
<th style="width:70px" rowspan=2></th>
<th style="text-align:center">Nanoribbons</th>
<th style="width:70px" rowspan=2></th>
<th style="text-align:center">Slab Models</th>
<tr>
<td valign="top"><ul>
<li><a href="{appbase}/upload_structure.ipynb" target="_blank">Upload structures</a>
<li><a href="{appbase}/rescale_structure.ipynb" target="_blank">Scale structures</a>
<li><a href="{appbase}/construct_cell.ipynb" target="_blank">Construct cell</a>
<li><a href="{appbase}/edit_structure.ipynb" target="_blank">Assign spin, remove atoms</a>
</ul></td>
<td valign="top"><ul>
<li><a href="{appbase}/nanoribbon/submit.ipynb" target="_blank">Submit calculation</a>
<li><a href="{appbase}/nanoribbon/search.ipynb" target="_blank">Search database</a>
</ul></td>
<td valign="top"><ul>
<li><a href="{appbase}/slab/build.ipynb" target="_blank">Construct slab</a>
<li><a href="{appbase}/slab/submit_geopt.ipynb" target="_blank">Submit geo-opt</a>
<li><a href="{appbase}/slab/search.ipynb" target="_blank">Search database</a>
<li><a href="{appbase}/slab/submit_adsorption.ipynb" target="_blank">Adsorption energy</a>
</ul></td>
</tr></table>
"""
html = template.format(appbase=appbase, jupbase=jupbase)
return ipw.HTML(html)
#EOF
| StarcoderdataPython |
64146 | <filename>masonite/helpers/misc.py
def dot(data, compile_to=None):
notation_list = data.split('.')
compiling = ""
compiling += notation_list[0]
beginning_string = compile_to.split('{1}')[0]
compiling = beginning_string + compiling
dot_split = compile_to.replace(beginning_string + '{1}', '').split('{.}')
if any(len(x) > 1 for x in dot_split):
raise ValueError("Cannot have multiple values between {1} and {.}")
for notation in notation_list[1:]:
compiling += dot_split[0]
compiling += notation
compiling += dot_split[1]
return compiling
| StarcoderdataPython |
3235159 | <gh_stars>0
import setuptools
setuptools.setup(name="nn-common-modules",
version="1.2",
url="https://github.com/abhi4ssj/nn-common-modules",
author="<NAME>, <NAME>, <NAME>",
author_email="<EMAIL>",
description="Common modules, blocks and losses which can be reused in a deep neural netwok specifically for segmentation",
packages=setuptools.find_packages(),
install_requires=['pip>=19.0.0', 'numpy>=1.14.0', 'torch>=1.0.0',
'squeeze_and_excitation @ https://github.com/ai-med/squeeze_and_excitation/releases/download/v1.0/squeeze_and_excitation-1.0-py2.py3-none-any.whl'],
python_requires='>=3.5')
| StarcoderdataPython |
1680277 | #!/usr/bin/env python
from nodes import RootNode, FilterNode, HamlNode, create_node
from optparse import OptionParser
import sys
VALID_EXTENSIONS=['haml', 'hamlpy']
class Compiler:
def __init__(self, options_dict=None):
options_dict = options_dict or {}
self.debug_tree = options_dict.pop('debug_tree', False)
self.options_dict = options_dict
def process(self, raw_text):
split_text = raw_text.split('\n')
return self.process_lines(split_text)
def process_lines(self, haml_lines):
root = RootNode(**self.options_dict)
line_iter = iter(haml_lines)
haml_node=None
for line_number, line in enumerate(line_iter):
node_lines = line
if not root.parent_of(HamlNode(line)).inside_filter_node():
if line.count('{') - line.count('}') == 1:
start_multiline=line_number # For exception handling
while line.count('{') - line.count('}') != -1:
try:
line = line_iter.next()
except StopIteration:
raise Exception('No closing brace found for multi-line HAML beginning at line %s' % (start_multiline+1))
node_lines += line
# Blank lines
if haml_node is not None and len(node_lines.strip()) == 0:
haml_node.newlines += 1
else:
haml_node = create_node(node_lines)
if haml_node:
root.add_node(haml_node)
if self.options_dict and self.options_dict.get('debug_tree'):
return root.debug_tree()
else:
return root.render()
def convert_files():
import sys
import codecs
parser = OptionParser()
parser.add_option(
"-d", "--debug-tree", dest="debug_tree",
action="store_true",
help="Print the generated tree instead of the HTML")
parser.add_option(
"--attr-wrapper", dest="attr_wrapper",
type="choice", choices=('"', "'"), default="'",
action="store",
help="The character that should wrap element attributes. "
"This defaults to ' (an apostrophe).")
(options, args) = parser.parse_args()
if len(args) < 1:
print "Specify the input file as the first argument."
else:
infile = args[0]
haml_lines = codecs.open(infile, 'r', encoding='utf-8').read().splitlines()
compiler = Compiler(options.__dict__)
output = compiler.process_lines(haml_lines)
if len(args) == 2:
outfile = codecs.open(args[1], 'w', encoding='utf-8')
outfile.write(output)
else:
print output
if __name__ == '__main__':
convert_files()
| StarcoderdataPython |
1681219 | <filename>thoughts/ricochet.py<gh_stars>1-10
# Data Structures & common logic
from enum import Enum
import heapq
import matplotlib.pyplot as plt
DIMENSION = 16 # size of the board
DIRX = [0, 0, -1, 1] # directional vectors
DIRY = [1, -1, 0, 0] # color vectors
COLORS = ['red','blue','green','purple']
MAX_DEPTH = 30
class Direction(Enum):
UP, DOWN, LEFT, RIGHT = range(4)
def reverse(self):
if self == Direction.UP:
return Direction.DOWN
elif self == Direction.DOWN:
return Direction.UP
elif self == Direction.LEFT:
return Direction.RIGHT
elif self == Direction.RIGHT:
return Direction.LEFT
return None
class Color(Enum):
RED, BLUE, GREEN, PURPLE = range(4)
class Board(object):
def __init__(self):
# note: bottom left of grid is 0, 0
self.walls = set() # list of walls - normalized to (x, y, (DOWN|LEFT))
def add_wall(self, x, y, direction):
"""Add a wall to the current position"""
self.walls.add(normalize_wall(x, y, direction))
def has_wall(self, x, y, direction):
"""Determine whether there's a wall in the given position."""
return normalize_wall(x, y, direction) in self.walls
class Stat(object):
def __init__(self):
self.iteration = 0
self.distance = -1
def __repr__(self):
return repr(self.__dict__)
def normalize_wall(x, y, direction):
'''walls are normalized to "down" or "left".'''
if direction == Direction.UP:
direction = Direction.DOWN
y += 1
elif direction == Direction.RIGHT:
direction = Direction.LEFT
x += 1
return (x, y, direction)
def compute_delta(robots1, robots2):
'''
computes delta between two positioning of robots. Assume that exactly one robot is moved.
return (color, (x, y), (x, y))
note: this logic is used to construct robot paths.
'''
for idx in range(len(COLORS)):
if robots1[idx] != robots2[idx]:
return (idx, robots1[idx], robots2[idx])
assert False, "same positions given"
def next_moves_single(board, robot_index, robots):
"""Generate list of next moves by moving a single robot given by the index."""
def generate(index, replaced_robot):
return tuple((replaced_robot if i == index else r) for (i, r) in enumerate(robots))
robot = robots[robot_index]
for direction in Direction:
moved = False
(x, y) = robot
while True:
newx = x + DIRX[direction.value]
newy = y + DIRY[direction.value]
# stops when a wall or another robot is encountered.
if board.has_wall(x, y, direction) or (newx, newy) in robots:
if moved: yield generate(robot_index, (x, y))
break
moved = True
x = newx
y = newy
def next_moves_all(board, robots):
"""Generate list of next moves by moving a single robot."""
for index in range(len(robots)):
for move in next_moves_single(board, index, robots):
assert move is not None
yield move
def prev_position(board, obstacles, start, magic_stop=False):
for direction in Direction:
(x, y) = start
reverse = direction.reverse()
prevx = x + DIRX[reverse.value]
prevy = y + DIRY[reverse.value]
if not magic_stop and not (board.has_wall(x, y, reverse) or (prevx, prevy) in obstacles):
continue # Cannot reach here.
moved = False
while True:
newx = x + DIRX[direction.value]
newy = y + DIRY[direction.value]
if board.has_wall(x, y, direction) or (newx, newy) in obstacles:
break
yield (newx, newy)
x = newx
y = newy
def astar(
start,
neighbour,
finish_condition,
heuristic=None,
stat=None):
"""
Perform an A* search.
finish_condition = (position) -> bool
neighbour - neibhbourhood generation function
heuristic = A* heuristic function. (new position, old position) -> distance
"""
queue = [] # contains (distance+heuristic, distance, position)
heapq.heappush(queue, (0, 0, start, None))
history = {start: (0, None)} # position -> (distance, previous)
visited = set()
if not stat: stat = Stat()
if not heuristic: heuristic = lambda new, old: 0
while queue:
stat.iteration += 1
_, distance, position, prev_position = heapq.heappop(queue)
if distance > MAX_DEPTH: return
if finish_condition(position):
# found a solution!
positions = [position, prev_position]
cur_position = prev_position
while cur_position in history:
cur_position = history[cur_position][1]
if cur_position is not None:
positions.append(cur_position)
stat.distance = distance
return positions
if position in visited: continue
visited.add(position)
new_distance = distance + 1
for new_position in neighbour(position):
if new_position in history and new_distance > history[new_position][0]: continue
history[new_position] = (new_distance, position)
heapq.heappush(queue, (new_distance + heuristic(position, new_position), new_distance, new_position, position))
def compute_all(start, neighbour):
"""
Compute shortest distance from "start" to all reachable node.
Note: This function should only be executed with relatively small graph.
"""
queue = []
# contains (distance, position, old_position)
heapq.heappush(queue, (0, start))
history = {start: (0, None)} # position -> (distance, previous)
visited = set()
while queue:
distance, position = heapq.heappop(queue)
if position in visited: continue
visited.add(position)
new_distance = distance + 1
for new_position in neighbour(position):
if new_position in history and new_distance > history[new_position][0]: continue
history[new_position] = (new_distance, position)
heapq.heappush(queue, (new_distance, new_position))
return history
def print_board(board,
robots=None,
paths=None,
additionals=None,
labels=None,
markers=None):
'''
Print the given board position.
robots - 4-tuple of pair (x, y), representing red, blue, green, and yellow robots.
paths - list of (color, (x, y), (x, y)) paths to draw.
additionals - list of (color, (x, y)) points to draw.
labels - list of labels to render.
'''
plt.figure(figsize=(5, 5))
axis = plt.gca()
MARGIN = 0.1
PADDING = 0.5
def plot_robot(index, coord, size):
(x, y) = coord
circle = plt.Circle((x + 0.5, y + 0.5), size, fc=COLORS[i])
axis.add_patch(circle)
def render_wall(wall):
(x1, y1, direction) = wall
if direction == Direction.DOWN:
x2 = x1 + 1
y2 = y1
else:
x2 = x1
y2 = y1 + 1
line = plt.Line2D((x1, x2), (y1, y2), lw=2.5, color='black')
axis.add_line(line)
def render_path(path):
(i, pos1, pos2) = path
line = plt.Line2D(
(pos1[0] + 0.5, pos2[0] + 0.5),
(pos1[1] + 0.5, pos2[1] + 0.5),
color=COLORS[i],
marker='x')
axis.add_line(line)
def render_marker(marker):
(color, coord) = marker
(x, y) = coord
rectangle = plt.Rectangle((x + MARGIN, y + MARGIN),
1 - MARGIN * 2,
1 - MARGIN * 2,
fc=COLORS[color])
axis.add_patch(rectangle)
for wall in board.walls: render_wall(wall)
for path in (paths or []): render_path(path)
for marker in (markers or []): render_marker(marker)
for additional in (additionals or []):
(i, robot) = additional
plot_robot(i, robot, 0.1)
if robots is not None:
for i in range(len(COLORS)):
plot_robot(i, robots[i], 0.4)
if labels is not None:
for row_idx, row in enumerate(labels):
for col_idx, cell in enumerate(row):
axis.text(col_idx + 0.5,
row_idx + 0.5,
cell,
verticalalignment='center',
horizontalalignment='center')
plt.xlim(0 - PADDING, DIMENSION + PADDING)
plt.ylim(0 - PADDING, DIMENSION + PADDING)
plt.show()
| StarcoderdataPython |
4842885 | <gh_stars>1-10
import pytest
from mixer.main import mixer
from smpa.models.address import Address, SiteAddress
@pytest.fixture
def address():
obj = Address()
obj.number = "42"
obj.property_name = "property name"
obj.address_line_1 = "address line 1"
obj.address_line_2 = "address line 2"
obj.address_line_3 = "address line 3"
obj.town_city = "town city"
obj.postcode = "postcode"
obj.validate()
return obj
@pytest.fixture
def site_address():
obj = SiteAddress()
obj.number = "42"
obj.property_name = "property name"
obj.address_line_1 = "address line 1"
obj.address_line_2 = "address line 2"
obj.address_line_3 = "address line 3"
obj.town_city = "town city"
obj.postcode = "postcode"
obj.validate()
return obj
| StarcoderdataPython |
199081 | import sys
import pkgutil
import inspect
import importlib
from collections import OrderedDict
def find_components(package, directory, base_class):
components = OrderedDict()
for module_loader, module_name, ispkg in pkgutil.iter_modules([directory]):
full_module_name = "%s.%s" % (package, module_name)
if full_module_name not in sys.modules and not ispkg:
module = importlib.import_module(full_module_name)
for member_name, obj in inspect.getmembers(module):
if inspect.isclass(obj) and issubclass(obj, base_class) and \
obj != base_class:
# TODO test if the obj implements the interface
# Keep in mind that this only instantiates the ensemble_wrapper,
# but not the real target classifier
classifier = obj
components[module_name] = classifier
return components | StarcoderdataPython |
1680224 | import os
import sys
import click
from ftcli.Lib.Font import Font
from ftcli.Lib.utils import getFontsList, makeOutputFileName, guessFamilyName
@click.group()
def setLineGap():
pass
@setLineGap.command()
@click.argument('input_path', type=click.Path(exists=True, resolve_path=True))
@click.option('-p', '--percent', type=click.IntRange(1, 100), required=True,
help="adjust font line spacing to % of UPM value")
@click.option('-mfn', '--modify-family-name', is_flag=True,
help="adds 'LG% to the font family to reflect the modified line gap'")
@click.option('-o', '--output-dir', type=click.Path(file_okay=False, resolve_path=True),
help='The output directory where the output files are to be created. If it doesn\'t exist, will be '
'created. If not specified, files are saved to the same folder.')
@click.option('--recalc-timestamp/--no-recalc-timestamp', default=False,
help='Keeps the original font \'modified\' timestamp (head.modified) or set it to current time. '
'By default, original timestamp is kept.')
@click.option('--overwrite/--no-overwrite', default=True,
help='Overwrites existing output files or save them to a new file (numbers are appended at the end of '
'file name). By default, files are overwritten.')
def set_linegap(input_path, percent, modify_family_name, output_dir, recalc_timestamp, overwrite):
"""Modifies the line spacing metrics in one or more fonts.
TThis is a CLI for font-line by Source Foundry: https://github.com/source-foundry/font-line
"""
files = getFontsList(input_path)
for f in files:
file_name, ext = os.path.splitext(os.path.basename(f))
file_dir = os.path.dirname(f)
try:
font = Font(f, recalcTimestamp=recalc_timestamp)
font.modifyLinegapPercent(percent)
# Modify the family name according to the linegap percent
if modify_family_name:
old_family_name = guessFamilyName(font)
if old_family_name:
old_family_name_without_spaces = old_family_name.replace(" ", "")
new_family_name = old_family_name + ' LG{}'.format(str(percent))
new_family_name_without_spaces = new_family_name.replace(" ", "")
font.findReplace(oldString=old_family_name, newString=new_family_name, fixCFF=True)
font.findReplace(oldString=old_family_name_without_spaces, newString=new_family_name_without_spaces,
fixCFF=True)
else:
click.secho('Warning: could not retrieve Family Name, it has not been modified.', fg='yellow')
# Before we add the "-linegap%" string to the new file name, let's remove it to avoid strange names like
# Font-Bold-linegap20-linegap20.otf
new_file_path = os.path.join(file_dir, file_name.replace('-linegap' + str(percent), '') + '-linegap'
+ str(percent) + ext)
output_file = makeOutputFileName(new_file_path, outputDir=output_dir, overWrite=overwrite)
font.save(output_file)
click.secho(f'{os.path.basename(output_file)} --> saved', fg='green')
except Exception as e:
click.secho(f'ERROR: {e}', fg='red')
@click.group()
def alignVMetrics():
pass
@alignVMetrics.command()
@click.argument('input_path', type=click.Path(exists=True, file_okay=False, resolve_path=True))
@click.option('-sil', '--sil-method', is_flag=True, help='Use SIL method: '
'http://silnrsi.github.io/FDBP/en-US/Line_Metrics.html')
@click.option('-o', '--output-dir', type=click.Path(file_okay=False, resolve_path=True),
help='The output directory where the output files are to be created. If it doesn\'t exist, will be '
'created. If not specified, files are saved to the same folder.')
@click.option('--recalc-timestamp/--no-recalc-timestamp', default=False,
help='Keeps the original font \'modified\' timestamp (head.modified) or set it to current time. '
'By default, original timestamp is kept.')
@click.option('--overwrite/--no-overwrite', default=True,
help='Overwrites existing output files or save them to a new file (numbers are appended at the end of '
'file name). By default, files are overwritten.')
def align(input_path, sil_method, output_dir, recalc_timestamp, overwrite):
"""
Aligns all fonts in INPUT_PATH to the same baseline.
To achieve this, the script finds the maximum ascender and the minimum descender values of the fonts stored in the
INPUT_PATH folder and applies those values to all fonts.
This can produce undesired effects (an exaggerated line height) when one or more fonts contain swashes, for example.
In such cases, it's better to copy the vertical metrics from a template font to one or more destination fonts using
the 'ftcli metrics copy' command.
See https://www.kltf.de/downloads/FontMetrics-kltf.pdf for more information.
"""
files = getFontsList(input_path)
idealAscenders = []
idealDescenders = []
realAscenders = []
realDescenders = []
for f in files:
try:
font = Font(f, recalcTimestamp=recalc_timestamp)
yMax = font['head'].yMax
yMin = font['head'].yMin
ascender = font['hhea'].ascender
descender = font['hhea'].descender
sTypoAscender = font['OS/2'].sTypoAscender
sTypoDescender = font['OS/2'].sTypoDescender
usWinAscent = font['OS/2'].usWinAscent
usWinDescent = font['OS/2'].usWinDescent
idealAscenders.extend([sTypoAscender])
idealDescenders.extend([abs(sTypoDescender)])
realAscenders.extend([yMax, usWinAscent, ascender])
realDescenders.extend(
[abs(yMin), abs(usWinDescent), abs(descender)])
except Exception as e:
click.secho('ERROR: {}'.format(e), fg='red')
files.remove(f)
maxRealAscender = max(realAscenders)
maxRealDescender = max(realDescenders)
maxIdealAscender = max(idealAscenders)
maxIdealDescender = max(idealDescenders)
sTypoLineGap = (maxRealAscender + maxRealDescender) - (maxIdealAscender + maxIdealDescender)
sTypoLineGap = 0
for f in files:
try:
font = Font(f, recalcTimestamp=recalc_timestamp)
font['hhea'].ascender = maxRealAscender
font['hhea'].descender = -maxRealDescender
font['hhea'].lineGap = 0
font['OS/2'].usWinAscent = maxRealAscender
font['OS/2'].usWinDescent = maxRealDescender
font['OS/2'].sTypoAscender = maxIdealAscender
font['OS/2'].sTypoDescender = -maxIdealDescender
font['OS/2'].sTypoLineGap = sTypoLineGap
if sil_method:
font['OS/2'].sTypoAscender = maxRealAscender
font['OS/2'].sTypoDescender = -maxRealDescender
font['OS/2'].sTypoLineGap = 0
output_file = makeOutputFileName(
f, outputDir=output_dir, overWrite=overwrite)
font.save(output_file)
click.secho(f'{os.path.basename(output_file)} --> saved', fg='green')
except Exception as e:
click.secho(f'ERROR: {e}', fg='red')
@click.group()
def copyVMetrics():
pass
@copyVMetrics.command()
@click.option('-s', '--source-file', type=click.Path(exists=True, dir_okay=False, resolve_path=True), required=True,
help='Source file. Vertical metrics from this font will be applied to all destination fonts.')
@click.option('-d', '--destination', type=click.Path(exists=True, resolve_path=True), required=True,
help='Destination file or directory')
@click.option('-o', '--output-dir', type=click.Path(file_okay=False, resolve_path=True),
help='The output directory where the output files are to be created. If it doesn\'t exist, will be '
'created. If not specified, files are saved to the same folder.')
@click.option('--recalc-timestamp/--no-recalc-timestamp', default=False,
help='Keeps the original font \'modified\' timestamp (head.modified) or set it to current time. By '
'default, original timestamp is kept.')
@click.option('--overwrite/--no-overwrite', default=True,
help='Overwrites existing output files or save them to a new file (numbers are appended at the end of '
'file name). By default, files are overwritten.')
def copy(source_file, destination, output_dir, recalc_timestamp, overwrite):
"""
Copies vertical metrics from a source font to one or more destination fonts.
"""
try:
source_font = Font(source_file)
ascender = source_font['hhea'].ascender
descender = source_font['hhea'].descender
usWinAscent = source_font['OS/2'].usWinAscent
usWinDescent = source_font['OS/2'].usWinDescent
sTypoAscender = source_font['OS/2'].sTypoAscender
sTypoDescender = source_font['OS/2'].sTypoDescender
sTypoLineGap = source_font['OS/2'].sTypoLineGap
except Exception as e:
click.secho('ERROR: {}'.format(e), fg='red')
sys.exit()
files = getFontsList(destination)
for f in files:
try:
font = Font(f, recalcTimestamp=recalc_timestamp)
font['hhea'].ascender = ascender
font['hhea'].descender = descender
font['hhea'].lineGap = 0
font['OS/2'].usWinAscent = usWinAscent
font['OS/2'].usWinDescent = usWinDescent
font['OS/2'].sTypoAscender = sTypoAscender
font['OS/2'].sTypoDescender = sTypoDescender
font['OS/2'].sTypoLineGap = sTypoLineGap
output_file = makeOutputFileName(
f, outputDir=output_dir, overWrite=overwrite)
font.save(output_file)
click.secho(f'{os.path.basename(output_file)} --> saved', fg='green')
except Exception as e:
click.secho(f'ERROR: {e}', fg='red')
cli = click.CommandCollection(sources=[alignVMetrics, copyVMetrics, setLineGap], help="""
Aligns all the fonts to the same baseline.
The 'ftcli metrics align' command calculates the maximum ascenders and descenders of a set of fonts and applies them to
all fonts in that set.
The 'ftcli metrics copy' command copies vertical metrics from a source font to one or more destination fonts.
"""
)
| StarcoderdataPython |
83153 | <reponame>sunshot/LeetCode<gh_stars>0
from typing import List
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
prefix = ""
if not strs or not strs[0]:
return prefix
for i in range(len(strs[0])):
curr = None
for x in strs:
if i >= len(x):
return prefix
if curr == None:
curr = x[i]
elif curr != x[i]:
return prefix
prefix += curr
return prefix
if __name__== '__main__':
solution = Solution()
strs = ["flower","flow","flight"]
ans = solution.longestCommonPrefix(strs)
print(ans) | StarcoderdataPython |
1765679 | <filename>app/maths.py<gh_stars>0
def get_semester(*, month: int) -> int:
if month <= 6:
return 1
return 2
| StarcoderdataPython |
3256659 | <reponame>kenchan0226/control-sum-cmdp
""" Evaluate the baselines ont ROUGE/METEOR"""
""" Adapted from https://github.com/ChenRocks/fast_abs_rl """
import argparse
import json
import os
from os.path import join, exists
from utils.evaluate import eval_meteor, eval_rouge
def main(args):
dec_dir = join(args.decode_dir, 'output')
with open(join(args.decode_dir, 'log.json')) as f:
split = json.loads(f.read())['split']
ref_dir = join(args.data, 'refs', split)
assert exists(ref_dir)
if args.rouge:
dec_pattern = r'(\d+).dec'
ref_pattern = '[A-Z].#ID#.ref'
output = eval_rouge(dec_pattern, dec_dir, ref_pattern, ref_dir, n_words=args.n_words, n_bytes=args.n_bytes)
metric = 'rouge'
else:
dec_pattern = '[0-9]+.dec'
ref_pattern = '[0-9]+.ref'
output = eval_meteor(dec_pattern, dec_dir, ref_pattern, ref_dir)
metric = 'meteor'
print(output)
with open(join(args.decode_dir, '{}.txt'.format(metric)), 'w') as f:
f.write(output)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluate the output files')
# choose metric to evaluate
metric_opt = parser.add_mutually_exclusive_group(required=True)
metric_opt.add_argument('-rouge', action='store_true',
help='ROUGE evaluation')
metric_opt.add_argument('-meteor', action='store_true',
help='METEOR evaluation')
parser.add_argument('-decode_dir', action='store', required=True,
help='directory of decoded summaries')
parser.add_argument('-data', action='store', required=True,
help='directory of decoded summaries')
parser.add_argument('-n_words', type=int, action='store', default=-1,
help='Only use the first n words in the system/peer summary for the evaluation.')
parser.add_argument('-n_bytes', type=int, action='store', default=-1,
help='Only use the first n bytes in the system/peer summary for the evaluation.')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
3334069 | import hashlib
import unittest
from unittest.mock import Mock, patch
from kademlia.crypto import Crypto
from kademlia.domain.domain import PersistMode, is_new_value_valid, validate_authorization
from kademlia.exceptions import InvalidSignException
from kademlia.utils import digest, sharedPrefix, OrderedSet
class UtilsTest(unittest.TestCase):
def test_digest(self):
d = hashlib.sha1(b'1').digest()
self.assertEqual(d, digest(1))
d = hashlib.sha1(b'another').digest()
self.assertEqual(d, digest('another'))
def test_sharedPrefix(self):
args = ['prefix', 'prefixasdf', 'prefix', 'prefixxxx']
self.assertEqual(sharedPrefix(args), 'prefix')
args = ['p', 'prefixasdf', 'prefix', 'prefixxxx']
self.assertEqual(sharedPrefix(args), 'p')
args = ['one', 'two']
self.assertEqual(sharedPrefix(args), '')
args = ['hi']
self.assertEqual(sharedPrefix(args), 'hi')
@patch('time.time', Mock(return_value=5))
def test_validate_authorization(self):
Crypto.check_signature = Mock(return_value=True)
value = Mock()
value.data = 'data'
value.authorization.sign = 'sign'
value.authorization.pub_key.exp_time = None
value.authorization.pub_key.key = 'key'
value.persist_mode = PersistMode.SECURED
dkey = hashlib.sha1('key'.encode('utf8')).digest()
dval = digest(dkey.hex() + value.data + str(value.authorization.pub_key.exp_time) + str(value.persist_mode))
validate_authorization(dkey, value)
Crypto.check_signature.assert_called_with(dval, 'sign', 'key')
value.authorization.pub_key.exp_time = 6
dval = digest(dkey.hex() + value.data + str(value.authorization.pub_key.exp_time) + str(value.persist_mode))
validate_authorization(dkey, value)
Crypto.check_signature.assert_called_with(dval, 'sign', 'key')
value.authorization.pub_key.exp_time = 4
with self.assertRaises(AssertionError):
validate_authorization(hashlib.sha1('key'.encode('utf8')).digest(), value)
value.authorization.pub_key.exp_time = 6
Crypto.check_signature = Mock(return_value=False)
with self.assertRaises(InvalidSignException):
validate_authorization(hashlib.sha1('key'.encode('utf8')).digest(), value)
@patch('kademlia.domain.domain.validate_authorization')
def test_check_new_value_valid(self, mocked_va):
stored_value = Mock()
new_value = Mock()
new_value.authorization = Mock()
new_value.authorization.pub_key.key = '0224d2079e86e937224f08aa37a857ca6116546868edde549d0bd6b8536af9d554'
stored_value.authorization = Mock()
stored_value.authorization.pub_key.key = '0224d2079e86e937224f08aa37a857ca6116546868edde549d0bd6b8536af9d554'
self.assertTrue(is_new_value_valid('dkey', stored_value, new_value))
mocked_va.assert_called_with('dkey', new_value)
new_value.authorization.pub_key.key = 'another key'
self.assertFalse(is_new_value_valid('dkey', stored_value, new_value))
new_value.authorization = None
self.assertFalse(is_new_value_valid('dkey', stored_value, new_value))
class OrderedSetTest(unittest.TestCase):
def test_order(self):
o = OrderedSet()
o.push('1')
o.push('1')
o.push('2')
o.push('1')
self.assertEqual(o, ['2', '1'])
| StarcoderdataPython |
1779851 | from networkx.algorithms.euler import is_eulerian
from networkx.algorithms.efficiency_measures import global_efficiency
from networkx.algorithms.efficiency_measures import local_efficiency
from networkx.algorithms.distance_regular import is_distance_regular
from networkx.algorithms.components import number_connected_components
from networkx.algorithms.components import is_connected
from networkx.algorithms.cluster import average_clustering
from networkx.algorithms.cluster import transitivity
from networkx.algorithms.clique import graph_number_of_cliques
from networkx.algorithms.clique import graph_clique_number
from networkx.algorithms.bridges import has_bridges
from networkx.algorithms.assortativity import degree_pearson_correlation_coefficient
from networkx.algorithms.assortativity import degree_assortativity_coefficient
from networkx.algorithms.approximation.clustering_coefficient import average_clustering
from networkx.algorithms.approximation.clique import large_clique_size
import netlsd
from .base import BaseGraph
import numpy as np
import torch
from functools import wraps
from .. import register_feature
NX_EXTRACTORS = []
def register_nx(cls):
NX_EXTRACTORS.append(cls)
register_feature(cls.__name__)(cls)
return cls
@register_nx
class NxGraph(BaseGraph):
def __init__(self, *args, **kwargs):
super(NxGraph, self).__init__(data_t="nx")
self._args = args
self._kwargs = kwargs
def extract(self, data):
return data.G.size()
def _transform(self, data):
dsc = self.extract(data)
dsc = torch.FloatTensor([[dsc]])
data.gf = torch.cat([data.gf, dsc], dim=1)
return data
def nxfunc(func):
r"""A decorator for networkx Graph transforms. You may want to use it to quickly wrap a nx Graph feature function object.
Examples
--------
@register_nx
@nxfunc(large_clique_size)
class NxLargeCliqueSize(NxGraph):pass
"""
def decorator_func(cls):
cls.extract = lambda s, data: func(data.G, *s._args, **s._kwargs)
return cls
return decorator_func
@register_nx
@nxfunc(large_clique_size)
class NxLargeCliqueSize(NxGraph):
pass
@register_nx
@nxfunc(average_clustering)
class NxAverageClusteringApproximate(NxGraph):
pass
@register_nx
@nxfunc(degree_assortativity_coefficient)
class NxDegreeAssortativityCoefficient(NxGraph):
pass
@register_nx
@nxfunc(degree_pearson_correlation_coefficient)
class NxDegreePearsonCorrelationCoefficient(NxGraph):
pass
@register_nx
@nxfunc(has_bridges)
class NxHasBridge(NxGraph):
pass
@register_nx
@nxfunc(graph_clique_number)
class NxGraphCliqueNumber(NxGraph):
pass
@register_nx
@nxfunc(graph_number_of_cliques)
class NxGraphNumberOfCliques(NxGraph):
pass
@register_nx
@nxfunc(transitivity)
class NxTransitivity(NxGraph):
pass
@register_nx
@nxfunc(average_clustering)
class NxAverageClustering(NxGraph):
pass
@register_nx
@nxfunc(is_connected)
class NxIsConnected(NxGraph):
pass
@register_nx
@nxfunc(number_connected_components)
class NxNumberConnectedComponents(NxGraph):
pass
# from networkx.algorithms.components import is_attracting_component
# @register_nx
# @nxfunc(is_attracting_component)
# class NxIsAttractingComponent(NxGraph):pass
# from networkx.algorithms.components import number_attracting_components
# @register_nx
# @nxfunc(number_attracting_components)
# class NxNumberAttractingComponents(NxGraph):pass
# from networkx.algorithms.connectivity.connectivity import average_node_connectivity
# @register_nx
# @nxfunc(average_node_connectivity)
# class NxAverageNodeConnectivity(NxGraph):pass
# from networkx.algorithms.distance_measures import diameter
# @register_nx
# @nxfunc(diameter)
# class NxDiameter(NxGraph):pass
# from networkx.algorithms.distance_measures import radius
# @register_nx
# @nxfunc(radius)
# class NxRadius(NxGraph):pass
@register_nx
@nxfunc(is_distance_regular)
class NxIsDistanceRegular(NxGraph):
pass
@register_nx
@nxfunc(local_efficiency)
class NxLocalEfficiency(NxGraph):
pass
@register_nx
@nxfunc(global_efficiency)
class NxGlobalEfficiency(NxGraph):
pass
@register_nx
@nxfunc(is_eulerian)
class NxIsEulerian(NxGraph):
pass
# till algorithms.flows
| StarcoderdataPython |
3345814 | DEFAULT_COMMAND_MAP = {
".c": "$CC -o $target $CFLAGS $CCFLAGS $sources",
".cpp": "$CXX -o $target $CXXFLAGS $CCFLAGS $sources",
}
| StarcoderdataPython |
3325869 | import setuptools
__version__ = "1.0rc1"
__author__ = "<NAME>"
def readme():
with open('README.md') as f:
return f.read()
setuptools.setup(
name='N_Network',
version=__version__,
license='MIT License',
description='A personal implementation of a Neural Network',
long_description=readme(),
long_description_content_type='text/markdown',
packages=setuptools.find_packages(),
url='https://github.com/doctorado-ml/neuralnetwork',
author=__author__,
author_email='<EMAIL>',
keywords='neural_network',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Natural Language :: English',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Intended Audience :: Science/Research'
],
install_requires=[
'scikit-learn>=0.23.0',
'numpy',
'matplotlib',
'seaborn'
],
zip_safe=False
)
| StarcoderdataPython |
3349633 | """Objects Module."""
def jlpoint(x, y, z):
"""Return a 3D coordinate dict.
Args:
x (float):
X-coordinate.
y (float):
Y-coordinate.
z (float):
Z-coordinate.
Returns:
(dict): 3D coordinate object.
"""
try:
x, y, z = float(x), float(y), float(z)
except ValueError:
raise Warning("Coordonates must be numbers")
return {
"x": x,
"y": y,
"z": z
}
| StarcoderdataPython |
50694 | import cv2
import numpy as np
img1 = cv2.imread('3D-Matplotlib.png')
img2 = cv2.imread('mainlogo.png')
# THREE DIFFERENT WAYS OF ADDING TWO PICTURE
#1
#add = img1+ img2
#2
#img = cv2.add(img1,img2) # USING BUILT IN FUNCTION OF CV2 TO ADD TWO IMAGES
#3
#weighted_add = cv2.addWeighted(img1, 0.6, img2, 0.4, 0) # 60% OF WEIGHT AND 40% WEIGHT OF IMG1 1ND IMG2 WILL BE ADDED ANG GAMMA VALUE IS ZERO
# MASKING
rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]
img2gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 220,255,cv2.THRESH_BINARY_INV)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(img2,img2,mask = mask)
dst = cv2.add(img1_bg,img2_fg)
img1[0:rows, 0:cols ] = dst
cv2.imshow('res',img1)
cv2.waitKey(0)
cv2.destroyAllWindows()
| StarcoderdataPython |
28231 | <reponame>chfw/gease
from mock import MagicMock, patch
from nose.tools import eq_
from gease.contributors import EndPoint
from gease.exceptions import NoGeaseConfigFound
class TestPublish:
@patch("gease.contributors.get_token")
@patch("gease.contributors.Api.get_public_api")
def test_all_contributors(self, fake_api, get_token):
get_token.side_effect = [NoGeaseConfigFound]
sample_reply = [
{"login": "howdy", "url": "https://api.github.com/users/howdy"}
]
fake_api.return_value = MagicMock(
get=MagicMock(
side_effect=[
sample_reply,
{"name": "<NAME>", "html_url": ""},
]
)
)
repo = EndPoint("test", "repo")
contributors = repo.get_all_contributors()
eq_(
contributors,
[{"name": "<NAME>", "html_url": ""}],
)
@patch("gease.contributors.get_token")
@patch("gease.contributors.Api.get_public_api")
def test_private_api(self, fake_api, get_token):
get_token.side_effect = [NoGeaseConfigFound]
sample_reply = [
{"login": "howdy", "url": "https://api.github.com/users/howdy"}
]
fake_api.return_value = MagicMock(
get=MagicMock(
side_effect=[sample_reply, {"name": None, "html_url": ""}]
)
)
repo = EndPoint("test", "repo")
contributors = repo.get_all_contributors()
eq_(
contributors,
[{"name": "howdy", "html_url": ""}],
)
@patch("gease.contributors.get_token")
@patch("gease.contributors.Api.get_api")
def test_no_names(self, fake_api, _):
sample_reply = [
{"login": "howdy", "url": "https://api.github.com/users/howdy"}
]
fake_api.return_value = MagicMock(
get=MagicMock(
side_effect=[sample_reply, {"name": None, "html_url": ""}]
)
)
repo = EndPoint("test", "repo")
contributors = repo.get_all_contributors()
eq_(
contributors,
[{"name": "howdy", "html_url": ""}],
)
| StarcoderdataPython |
53058 | <reponame>Ntermast/BKE<filename>bke/bke_client/forms.py
from django import forms
from django.core.validators import FileExtensionValidator
from .models import Channel, Podcast
class ChannelForm(forms.ModelForm):
image = forms.ImageField(required=True)
class Meta:
model = Channel
fields = ('image',)
class PodcastForm(forms.ModelForm):
image = forms.ImageField(required=True)
file = forms.FileField(required=True, validators=[FileExtensionValidator(
allowed_extensions=['mp3'])])
class Meta:
model = Podcast
fields = ('image', 'file')
class PodcastImageForm(forms.ModelForm):
image = forms.ImageField(required=True)
class Meta:
model = Podcast
fields = ('image',)
class PodcastFileForm(forms.ModelForm):
file = forms.FileField(required=True, validators=[FileExtensionValidator(
allowed_extensions=['mp3'])])
class Meta:
model = Podcast
fields = ('file',)
| StarcoderdataPython |
4810441 | # coding=utf-8
from flask import Flask, request, jsonify, g
from Plan import RequestException
import Plan
import traceback
app = Flask(__name__)
def wrap_response(result):
return jsonify(result=result, error=None)
def success():
return wrap_response(True)
# 测试服务器
@app.route('/')
def ping():
return success()
# 得到计划
@app.route('/plan/<unit>/<int:index>', methods=['GET'])
def get_plans(index, unit):
return wrap_response(Plan.get_plans(index, unit))
# 得到当前的所有的计划
@app.route('/plan/active', methods=['GET'])
def get_current_plans():
return wrap_response(Plan.get_current_plans())
# 添加/修改一个计划
@app.route('/plan/<plan_id>', methods=['PUT', 'POST'])
def add_plan(plan_id):
plan_to_save = request.get_json()
if not plan_to_save['id'] == plan_id:
raise Exception('id in url not matched with id in request body')
plan_exist = Plan.get_plan(plan_id)
if plan_exist:
if 'sort' in plan_to_save and type(plan_to_save['sort']) in [float, int]:
Plan.update_plan_filed(plan_id, 'sort', plan_to_save['sort'])
else:
Plan.add_plan(plan_to_save)
return success()
# 删除一个计划
@app.route('/plan/<plan_id>', methods=['DELETE'])
def delete_plan(plan_id):
Plan.delete_plan(plan_id)
return success()
# 把一个计划标记为已完成
@app.route('/plan/<plan_id>/<index>/_done', methods=['PUT', 'POST'])
def finish_plan(plan_id, index):
Plan.add_plan_record(plan_id, index)
return success()
# 把一个计划标记为以未完成
@app.route('/plan/<plan_id>/<index>/_done', methods=['DELETE'])
def remove_finish_plan(plan_id, index):
Plan.delete_plan_record(plan_id, index)
return success()
@app.errorhandler(RequestException)
def request_error_handler(error):
traceback.print_exc()
return jsonify({'error': error.message}), 400
@app.teardown_appcontext
def close_connection(e):
if e is not None:
print e
db = getattr(g, '_db', None)
if db is not None:
db.close()
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000, debug=True)
| StarcoderdataPython |
1702342 | <gh_stars>0
#!/usr/bin/env python
import argparse
import os
from odt import ODTPage
class HTMLGenerator:
def __init__(self, odtfile, page=1, pagename='page', title='Title', index=None):
self.page = page
self.pagename = pagename
self.title = title
self.index = index
self.gen_index = index is not None
self.pages = 0
if not self.gen_index:
self.index = "%s_1" % self.pagename
self.odt = ODTPage(odtfile, pagename=self.pagename, indexname=self.index)
self.got_title = False
self.prev_page = False
self.index_data = ''
self.data = ''
self.content = ''
def isOdtFileOpen(self):
return self.odt.odt.open()
def extractFileFromOdt(self, fname):
if self.isOdtFileOpen():
return self.odt.odt.extract(fname)
return None
def detectFileMime(self, fname):
fname = fname.lower()
if '.png' in fname:
return 'image/png'
elif '.jpeg' in fname or '.jpg' in fname:
return 'image/jpeg'
return None
def isValidFile(self, fname):
return self.detectFileMime(fname) is not None
def img(self, fname):
if self.isValidFile(fname):
data = self.extractFileFromOdt(fname)
if data is None:
data = ''
return data
def isPagesLeft(self):
return self.page <= self.pages
def countPages(self):
self.pages = self.odt.pages()
def isNormalPage(self):
return not self.gen_index or self.got_title or self.page_title
def resolvePreviousPage(self):
if self.gen_index and not self.got_title:
self.prev_page = '%s.html' % self.index
else:
self.prev_page = self.got_title
def writePage(self):
with open('%s_%s.html' % (self.pagename, self.page), 'w') as fd:
fd.write(self.data.encode('utf-8'))
self.got_title = True
def appendIndexData(self):
self.index_data += self.content
def nextPage(self):
self.page += 1
def getPageContentsInUtf8(self):
return self.odt.genIndex(self.title, self.index_data).encode('utf-8')
def writeIndexPage(self):
if self.gen_index:
with open('%s.html' % self.index, 'w') as fd:
fd.write(self.getPageContentsInUtf8())
def getPage(self):
(self.page_title, self.content, self.data) = self.odt.getPage(page=self.page, title=self.title, prev_page=self.prev_page)
def makeImageFolder(self, extra=''):
try:
os.makedirs('img/%s' % extra)
except:
pass
def getImageData(self, img):
self.img_data = self.img(img)
def writeImage(self, img):
with open('img/%s' % img, 'w+') as fd:
fd.write(self.img_data)
def generateImages(self):
for img in self.odt.odt.images:
self.getImageData(img)
self.makeImageFolder(os.path.dirname(img))
self.writeImage(img)
def generatePage(self):
self.resolvePreviousPage()
self.getPage()
if self.isNormalPage():
self.writePage()
else:
self.appendIndexData()
self.nextPage()
def generatePages(self):
while self.isPagesLeft():
self.generatePage()
self.writeIndexPage()
def generateHTML(self):
self.countPages()
self.generatePages()
self.generateImages()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ODT-HTML5')
parser.add_argument('-t', '--title', default='', help='Title')
parser.add_argument('-p', '--prefix', default='page', help='Page prefix')
parser.add_argument('-i', '--index', default=None, help='Generate index with')
parser.add_argument('filename', help='Input ODT')
args = parser.parse_args()
g = HTMLGenerator(args.filename, pagename=args.prefix, index=args.index, title=args.title)
g.generateHTML()
| StarcoderdataPython |
138720 | <gh_stars>0
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from .models import SlackAlertUserData
from cabot3.cabotapp.models import Service
import json
import re
@csrf_exempt
def slack_message_callback(request):
payload = json.loads(request.POST['payload'])
service_id = re.match('acknowledge_(\d+)', payload['callback_id']).groups()[0]
slack_alias = payload['user']['name']
# .user.user to go through user_profile -> user...
user = SlackAlertUserData.objects.get(slack_alias=slack_alias).user.user
service = Service.objects.get(pk=service_id)
service.acknowledge_alert(user)
message = payload['original_message']
# Strip out button
for attach in message['attachments']:
if int(attach['id']) != int(payload['attachment_id']):
continue
attach['actions'] = []
message['attachments'].append({
'text': 'Acknowledged by @{}'.format(slack_alias),
'color': 'warning',
})
return JsonResponse(message)
| StarcoderdataPython |
182216 | from setuptools import setup
setup(
name='foucluster',
description='Clustering of songs using Fourier Transform',
long_description='Similarities among songs are computed using Fast Fourier '
'Transform. With this information, unsupervised machine learning'
' is applied.',
url='https://github.com/cperales/foucluster',
version='2.0',
author='<NAME>',
author_email='<EMAIL>',
keywords=['cluster', 'Fourier', 'music', 'song',
'machine learning', 'kmeans'],
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: POSIX :: Linux',
'Topic :: Artistic Software',
'Topic :: Scientific/Engineering',
'Topic :: Multimedia :: Sound/Audio :: Analysis'
],
packages=['foucluster'
],
zip_safe=False,
install_requires=['numpy',
'scipy',
'pandas',
'sklearn',
'seaborn'
],
include_package_data=True,
setup_requires=[],
tests_require=['pytest',
'pytest-cov'],
extras_require={
'docs': [
'sphinx'
]
},
)
| StarcoderdataPython |
78655 | """Author: <NAME>, Copyright 2019, MIT License"""
from multiarchy.loggers.logger import Logger
import tensorflow as tf
class TensorboardInterface(Logger):
def __init__(
self,
replay_buffer,
logging_dir,
):
# create a separate tensor board logging thread
self.replay_buffer = replay_buffer
self.logging_dir = logging_dir
# create the tensor board logging file to save training data
tf.io.gfile.makedirs(logging_dir)
self.writer = tf.summary.create_file_writer(logging_dir)
def record(
self,
key,
value,
):
# get the current number of samples collected
tf.summary.experimental.set_step(self.replay_buffer.get_total_steps())
with self.writer.as_default():
# generate a plot and write the plot to tensor board
if len(tf.shape(value)) == 1:
pass
# generate several plots and write the plot to tensor board
elif len(tf.shape(value)) == 2:
pass
# write a single image to tensor board
elif len(tf.shape(value)) == 3:
tf.summary.image(key, tf.expand_dims(value, 0) * 0.5 + 0.5)
# write several images to tensor board
elif len(tf.shape(value)) == 4:
tf.summary.image(key, value * 0.5 + 0.5)
# otherwise, assume the tensor is still a scalar
else:
tf.summary.scalar(key, value)
class TensorboardLogger(Logger):
def __init__(
self,
replay_buffer,
logging_dir,
):
# create a separate tensor board logging thread
self.interface = TensorboardInterface(replay_buffer, logging_dir)
def record(
self,
key,
value,
):
# get the current number of samples collected
self.interface.record(key, value)
| StarcoderdataPython |
133389 | <filename>flex_config/__init__.py
import json
from typing import Any, Dict, Iterable, Optional, Sequence, Set, Union
from .aws_source import AWSSource
from .config_source import ConfigSource
from .env_source import EnvSource
from .yaml_source import YAMLSource
class FlexConfig(Dict[str, Any]):
""" Holds config values which can be loaded from many sources """
def __init__(self, required: Set[str] = None) -> None:
super().__init__()
self.required: Optional[Set[str]] = required
def load_sources(self, config_sources: Union[Sequence[ConfigSource], ConfigSource]) -> None:
"""
Load the data from a [ConfigSource][flex_config.config_source.ConfigSource] or list thereof
Args:
config_sources: A Single or ConfigSource or Iterable of ConfigSources.
"""
if not isinstance(config_sources, Sequence):
config_sources = [config_sources]
for source in config_sources:
for (path, value) in source.items():
self[path] = value
def validate(self) -> None:
"""
Verify that all required attributes are set
Raises:
KeyError: If any required attribute is missing
"""
if self.required is None:
return
for attr in self.required:
# noinspection PyStatementEffect
self[attr]
@staticmethod
def flatten_dict(d: Dict[str, Any]) -> Dict[str, Any]:
"""
Takes a dict with potentially nested values and returns a flat dict
Returns:
Flattened dictionary
"""
keys = list(d.keys()) # can't be an iterator because we're modifying in this loop
for key in keys:
if isinstance(d[key], dict): # This is a dict within the top level dict
d[key] = FlexConfig.flatten_dict(d[key]) # First flatten the internal dict
for inner_key, inner_value in d[key].items(): # Now pull all of its keys into the top level dict
d[f"{key}/{inner_key}"] = inner_value
del d[key]
return d
@staticmethod
def _value_from_string(value: str) -> Any:
if value.isdigit():
return int(value)
elif value.startswith("{") and value.endswith("}"):
return json.loads(value)
elif "." in value:
# noinspection PyBroadException
# Test to see if this value is a float
try:
return float(value)
except:
pass
return value
def __getitem__(self, item: str) -> Any:
path_parts = item.split("/")
current = super().__getitem__(path_parts[0])
for part in path_parts[1:]:
if isinstance(current, dict):
current = current[part]
return current
def get(self, item: str, default: Any = None) -> Any:
""" Get a value if it exists, if not return None """
try:
return self[item]
except KeyError:
return default
def __setitem__(self, key: str, value: Any) -> None:
""" Create or update the value at specified /-delimited path. Creates a dict structure for nested path parts"""
path_parts = key.split("/")
if isinstance(value, str):
value = FlexConfig._value_from_string(value)
if isinstance(value, dict):
# Could be updating multiple items, flatten and set them one at a time
for inner_key, inner_value in FlexConfig.flatten_dict(value).items():
self[f"{key}/{inner_key}"] = inner_value
return
target = self
for part in path_parts[:-1]:
target = target.setdefault(part, {})
if target is self: # Make sure we don't recursively call this fn again
super().__setitem__(path_parts[-1], value)
else:
target[path_parts[-1]] = value
__all__ = ["FlexConfig", "ConfigSource", "AWSSource", "EnvSource", "YAMLSource"]
| StarcoderdataPython |
3358151 | # coding: utf-8
import socketserver
import os
# Copyright 2020 <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Furthermore it is derived from the Python documentation examples thus
# some of the code is Copyright © 2001-2020 Python Software
# Foundation; All Rights Reserved
#
# http://docs.python.org/2/library/socketserver.html
#
# run: python freetests.py
# try: curl -v -X GET http://127.0.0.1:8080/
class MyWebServer(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
#print ("Got a request of: %s\n" % self.data)
request = self.data.decode().split()
# ignore any empty requests
if len(request) < 1:
return
method = request[0]
requestPath = request[1]
# only accept GET requests
if method != 'GET':
self.sendMethodNotAllowed()
return
# www folder path
basePath = os.getcwd() + '/www'
# verify that client is requesting from www folder
requestAbsPath = os.path.abspath(basePath + requestPath)
if requestAbsPath[:len(basePath)] != basePath:
self.sendNotFound()
return
# process request
while True:
try:
# open requested file
path = basePath + requestPath
f = open(path, 'r')
fileType = requestPath.split('.')[-1]
fileSize = os.path.getsize(path)
self.sendOk(f, fileType, fileSize)
except (FileNotFoundError, NotADirectoryError):
self.sendNotFound()
except IsADirectoryError:
# serve default page of directory
if requestPath[-1] == '/':
requestPath += 'index.html'
continue
# otherwise, use a redirect to correct the path ending
else:
newLocation = 'http://127.0.0.1:8080' + requestPath + '/'
self.sendRedirect(newLocation)
break
def sendOk(self, fileHandle, fileType, fileSize):
content = fileHandle.read()
status = 'HTTP/1.1 200 OK\r\n'
contentType = ''
if fileType == 'html':
contentType = 'Content-Type: text/html\r\n'
elif fileType == 'css':
contentType = 'Content-Type: text/css\r\n'
contentLength = 'Content-Length: ' + str(fileSize) + '\r\n'
headerEnd = '\r\n'
response = status + contentType + contentLength + headerEnd + content
self.request.sendall(bytes(response, 'utf-8'))
def sendRedirect(self, newLocation):
status = 'HTTP/1.1 301 Moved Permanently\r\n'
location = 'Location: ' + newLocation + '\r\n'
headerEnd = '\r\n'
response = status + location + headerEnd
self.request.sendall(bytes(response, 'utf-8'))
def sendNotFound(self):
content = "<h1>404 Not Found</h1>\n"
status = 'HTTP/1.1 404 Not Found\r\n'
contentType = 'Content-Type: text/html\r\n'
contentLength = 'Content-Length: ' + str(len(bytes(content, 'utf-8'))) + '\r\n'
headerEnd = '\r\n'
response = status + contentType + contentLength + headerEnd + content
self.request.sendall(bytes(response, 'utf-8'))
def sendMethodNotAllowed(self):
content = '<h1>405 Method Not Allowed</h1>\n'
status = 'HTTP/1.1 405 Method Not Allowed\r\n'
allow = 'Allow: GET\r\n'
contentType = 'Content-Type: text/html\r\n'
contentLength = 'Content-Length: ' + str(len(bytes(content, 'utf-8'))) + '\r\n'
headerEnd = '\r\n'
response = status + allow + contentType + headerEnd + content
self.request.sendall(bytes(response, 'utf-8'))
if __name__ == "__main__":
HOST, PORT = "localhost", 8080
socketserver.TCPServer.allow_reuse_address = True
# Create the server, binding to localhost on port 8080
server = socketserver.TCPServer((HOST, PORT), MyWebServer)
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever()
| StarcoderdataPython |
3393941 | <gh_stars>1-10
"""This module implements the main basic methods to Image Processing."""
try:
from postimpressionism.Setimpressionism import *
except ModuleNotFoundError:
from Setimpressionism import *
# If I am testing from the directory . . .
img_path = "C:\\"
def _path_ (p = None):
"""Setter and Getter for default path to the image files"""
global img_path; #Python reserved word and where to find them
if(p.__class__() != str):
return img_path
img_path = p
def show (img, title=''):
"""Image output from matplotlib.pyplot"""
if(img.__class__ == str):
if(title==''):
title = img
img = imopen(input)
plot.imshow(img)
plot.title(title)
plot.show()
def imsave (image, name):
"""Image file is saved"""
image = imopen(image)
im.fromarray(image).save(name)
def imopen (image, noalpha = True):
"""Image open from system file"""
if(image.__class__ == str):
image = im.open(image)
image = array(image)
image.setflags(write=1)
if(noalpha and image.shape[2]>3):
return image[:,:,0:3]
else:
image = image.copy()
return image
def convert (a, b, c, is_hsv=False):
"""Convert RGB and HSV"""
if is_hsv:
a = list(hsv_to_rgb(a,b,c))
for c in range(3):
a[c] = int(255*a[c])
return a
return list (rgb_to_hsv (a/255, b/255, c/255))
def gray (px, *weights, cast = int):
"""Converts the pixel to grayscale using the given weights (or 1 by default) casting to int"""
y = w = cast(0)
for x in range(len(px)):
z = cast(px[x])
try:
y += z * weights[x]
w += weights[x]
except Exception:
y += z
w += 1
return y/w
def grey (p, *w, c=uint8):
"""Converts the pixel to grayscale using the given weights (or 1 by default) keeping uint8"""
return gray(p,*w,c)
def mean (p, include_zeros = True):
"""Sums all the channels and divide by their quantity (by default, counts zeros as well)"""
d = s = 0
for c in p:
s += c
d += include_zeros or c != 0
return s//d
def product (p):
"""Multiply all the channels (mod 256)"""
b = 1
for c in p:
b *= c
return b%256
mult = lambda x, y: x*y
div = lambda x, y: x//y
add = lambda x, y: x+y
sub = lambda x, y: x-y
# We finally finished the basic things.
print("\tAll basic\tmethods imported.") | StarcoderdataPython |
4804193 | <filename>compose/views.py<gh_stars>1-10
import datetime
import itertools
from django.contrib import auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.shortcuts import get_object_or_404, redirect, render
from .models import DailyEntry, get_current_streak
@login_required
def index(request):
past_entries_by_month = get_past_entries_by_month(request.user)
context = {
'past_entries_by_month': past_entries_by_month,
'user': request.user,
}
return render(request, 'compose/index.html', context)
def get_past_entries_by_month(user, *, exclude=None):
"""Return a list of (month, entries) pairs in reverse chronological order,
where `month` is the month and year as a string and `entries` is a list of
DailyEntry objects, also in reverse chronological order.
"""
past_entries = DailyEntry.objects \
.filter(date__lt=datetime.date.today(), user=user) \
.exclude(date=exclude) \
.exclude(word_count=0) \
.order_by('-date')
key = lambda e: (e.date.month, e.date.year)
return [month(g) for _, g in itertools.groupby(past_entries, key)]
def month(entrygroup):
entrygroup = list(entrygroup)
return (entrygroup[0].date.strftime('%B %Y'), entrygroup)
@login_required
def archive(request, year, month, day):
date = datetime.date(year, month, day)
entry = get_object_or_404(DailyEntry, date=date, user=request.user)
past_entries_by_month = get_past_entries_by_month(request.user,
exclude=date)
context = {
'entry': entry,
'past_entries_by_month': past_entries_by_month,
'user': request.user
}
return render(request, 'compose/archive.html', context)
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(request, username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect(request.POST['next'] or '/')
else:
blank_form = AuthenticationForm()
context = {
'form': blank_form,
'errormsg': 'Invalid username or password',
}
return render(request, 'compose/login.html', context)
else:
if request.user.is_authenticated:
return redirect('/')
form = AuthenticationForm()
context = {
'form': form,
'next': request.GET.get('next')
}
return render(request, 'compose/login.html', context)
def logout(request):
auth.logout(request)
return redirect('compose:login')
| StarcoderdataPython |
1731032 | import sys
sys.path.insert(0, './')
from raspberrypy.network.wifi import Wifi
from time import sleep
if __name__ == '__main__':
wifi = Wifi(interface='wlan0', ignore_root_limit=True)
def get_pos(wifi):
wifi.update()
return (wifi.cells['CandyTime_804_plus'].siglevel,
wifi.cells['CandyTime_804'].siglevel)
while True:
sleep(0.5)
print get_pos(wifi)
| StarcoderdataPython |
4828198 |
import sys
import os
import torch
import torch.nn as nn
import numpy as np
# import torchvision
from torch.utils.data import DataLoader
from datetime import datetime
import random
import argparse
from utils import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', type=str, default='dataset/toyset/',
help='Input dataset path')
parser.add_argument('--input_size', type=int, default=16,
help='Dimention of the poi/user')
parser.add_argument('--hidden_size', type=int, default=16,
help='Set the output_size of LSTM')
parser.add_argument('--layers', type=int, default=2,
help='Set the layers of LSTM')
parser.add_argument('--lr', type=float, default=0.001,
help='Set the learning rate')
parser.add_argument('--delt_t', type=float, default=6.0,
help='Set the delt_t')
parser.add_argument('--epochs', type=int, default=20,
help='Set the epochs')
parser.add_argument('--dr', type=float, default=0.2,
help='Set the drop rate')
parser.add_argument('--seed', type=int, default=1,
help='Set the random seed')
parser.add_argument('--test_sample_num', type=int, default=100,
help='Set the number of test records')
return parser.parse_args()
def log(fname, s):
f = open(fname, 'a')
f.write(str(datetime.now()) + ': ' + s + '\n')
f.close()
def switch_list_to_tensor():
print(PATH)
start_time = time.time()
print('Loading configuration!')
history = read_history(PATH+'train_checkin_file.txt')
final_model2 = read_vec(PATH+'vec_2nd_wo_norm.txt')
print('fnished read!')
final_model2 = normalize_dict(final_model2)
print('finished normalized!')
# final_model= connecate(final_model1,final_model2)
for k,v in final_model2.items():
final_model2[k] = torch.Tensor(v)
return final_model2 # final_model2['124'].shape[0]= 16
def switch_tensor_to_array(final_model2):
for k,v in final_model2.items():
final_model2[k] = v.numpy()
return final_model2
def gen_train_data(final_model2):
train_user_records = generate_user_records(PATH+'train_checkin_file.txt')
less_three_records_user_list = []
for k in train_user_records.keys():
if len(train_user_records[k]) < 3:
less_three_records_user_list.append(k)
for k in less_three_records_user_list:
train_user_records.pop(k)
LSTM_train_records_output = []
LSTM_train_records_input = []
index = 0
for userid,poi_list in train_user_records.items():
userid_tensor = final_model2[userid]
LSTM_train_records_input.append([])
LSTM_train_records_output.append([])
for poi in poi_list:
LSTM_train_records_input[index].append(torch.cat((userid_tensor, final_model2[poi]), 0))
LSTM_train_records_output[index].append(final_model2[poi])
index = index + 1
for index,item in enumerate(LSTM_train_records_input):
LSTM_train_records_input[index] = item[:-1]
for index,item in enumerate(LSTM_train_records_output):
LSTM_train_records_output[index] = item[1:]
LSTM_train_records_input_ = []
LSTM_train_records_output_ = []
for index,tensor_list in enumerate(LSTM_train_records_input):
tensor = tensor_list[0]
for item in tensor_list[1:]:
tensor = torch.cat((tensor, item), 0)
tensor = tensor.view(len(tensor_list), -1)
LSTM_train_records_input_.append(tensor)
for index,tensor_list in enumerate(LSTM_train_records_output):
tensor = tensor_list[0]
for item in tensor_list[1:]:
tensor = torch.cat((tensor, item), 0)
tensor = tensor.view(len(tensor_list), -1)
LSTM_train_records_output_.append(tensor)
print('gen_train_data')
return LSTM_train_records_input_, LSTM_train_records_output_
def gen_test_data(final_model2):
train_user_records = generate_user_records(PATH+'train_checkin_file.txt')
test_user_records = generate_user_records(PATH+'test_checkin_file.txt')
LSTM_test_records = gen_LSTM_test_records(train_user_records, test_user_records, PATH+'test_checkin_file.txt',delt_t=DELT_T)
random.seed(SEED)
LSTM_test_records = random.sample(LSTM_test_records, TEST_SAMPLE_NUM)
LSTM_test_records_input = []
LSTM_test_records_output = []
LSTM_test_user_list = []
for userid,input_poi_list,target_poi_list in LSTM_test_records:
userid_tensor = final_model2[userid]
input_ = []
try:
for poi in input_poi_list:
input_.append(torch.cat((userid_tensor, final_model2[poi]), 0))
LSTM_test_records_input.append(input_)
LSTM_test_records_output.append(target_poi_list)
LSTM_test_user_list.append(userid)
except:
continue
LSTM_test_records_input_ = []
for index,tensor_list in enumerate(LSTM_test_records_input):
tensor = tensor_list[0]
for item in tensor_list[1:]:
tensor = torch.cat((tensor, item), 0)
tensor = tensor.view(len(tensor_list), -1)
LSTM_test_records_input_.append(tensor)
print('gen_test_data')
return LSTM_test_records_input_, LSTM_test_records_output, LSTM_test_user_list
class lstm(nn.Module):
def __init__(self):
super(lstm, self).__init__()
self.rnn = nn.LSTM(
input_size = INPUT_SIZE,
hidden_size = HIDDEN_SIZE,
num_layers = LAYERS,
dropout = DROP_RATE,
batch_first = False
)
self.hidden_out = nn.Linear(HIDDEN_SIZE, int(INPUT_SIZE/2))
self.h_s = None
self.h_c = None
def forward(self, x):
r_out, (h_s, h_c) = self.rnn(x)
output = self.hidden_out(r_out)
return output
if __name__ == "__main__":
args = parse_args()
PATH = args.input_path
INPUT_SIZE = args.input_size * 2
HIDDEN_SIZE = args.hidden_size
DELT_T = args.delt_t
TEST_SAMPLE_NUM = args.test_sample_num
LAYERS = args.layers
DROP_RATE = args.dr
LR = args.lr
EPOCHS = args.epochs
SEED = args.seed
random.seed(SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
torch.manual_seed(SEED)
final_model2 = switch_list_to_tensor()
LSTM_train_records_input, LSTM_train_records_output = gen_train_data(final_model2)
train = zip(LSTM_train_records_input, LSTM_train_records_output) # iterator
train_ = []
for pairs in train:
train_.append((pairs[0].view(-1,1,INPUT_SIZE), pairs[1].view(-1,1,int(INPUT_SIZE/2))))
LSTM_test_records_input, LSTM_test_target_poi_list, LSTM_test_user_list = gen_test_data(final_model2)
rnn = lstm().to(device)
optimizer = torch.optim.Adam(rnn.parameters(), lr=LR) # optimize all cnn parameters
loss_func = nn.MSELoss()
mult_step_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
milestones=[EPOCHS//2, EPOCHS//4*3], gamma=0.1)
train_loss = []
min_valid_loss = np.inf
print('Training...')
for i in range(EPOCHS):
total_train_loss = []
rnn.train()
for step, (b_x, b_y) in enumerate(train_):
b_x = b_x.type(torch.FloatTensor).to(device)
b_y = b_y.type(torch.FloatTensor).to(device)
prediction = rnn(b_x)
loss = loss_func(prediction, b_y)
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
total_train_loss .append(loss.item())
train_loss.append(np.mean(total_train_loss ))
random.shuffle(train_)
log_string = ('iter: [{:d}/{:d}], train_loss: {:0.6f}, lr: {:0.7f}').format((i + 1), EPOCHS, train_loss[-1], optimizer.param_groups[0]['lr'])
mult_step_scheduler.step()
print(log_string)
log('./LSTM.log', log_string)
rnn = rnn.eval()
candidate_pois_tensor = []
for step, b_x in enumerate(LSTM_test_records_input):
b_x = b_x.view(-1,1,int(INPUT_SIZE))
b_x = b_x.type(torch.FloatTensor).to(device)
prediction = rnn(b_x)
prediction = prediction[-1][-1]
candidate_pois_tensor.append(prediction)
norm_candidate_pois_array = []
for item in candidate_pois_tensor:
item = item.detach().cpu().numpy()
item = normalize(item)
norm_candidate_pois_array.append(item)
final_model2 = switch_tensor_to_array(final_model2)
node_type = get_node_type(PATH+'node_type.txt')
history = read_history(PATH+'train_checkin_file.txt')
print('evaluate!')
accuracy, precision, recall, ndcg, hit_ratio, MAP = evaleate_all_index_LSTM_no_history(norm_candidate_pois_array, LSTM_test_target_poi_list, node_type, final_model2)
| StarcoderdataPython |
1776479 | <reponame>LatticeLabVentures/BeamNet<filename>examples/starkex-cairo/starkware/python/expression_string.py
"""
The class ExpressionString allows creating strings that represent arithmetic expressions with the
correct amount of parentheses.
For example, you may define:
a = ExpressionString.highest('a')
b = ExpressionString.highest('b')
...
And then,
str((a + b) * (c * (d + e)) == '(a + b) * c * (d + e)'
"""
from enum import Enum, auto
class OperatorPrecedence(Enum):
"""
Represents the precedence of an operator.
"""
LOWEST = 0 # Unary minus.
PLUS = auto() # Either + or -.
MUL = auto() # Either * or /.
POW = auto() # Power (**).
ADDROF = auto() # Address-of operator (&).
HIGHEST = auto() # Numeric values, variables, parenthesized expressions, ...
def __lt__(self, other):
return self.value < other.value
class ExpressionString:
"""
Represents a string which contains an arithmetic expression, together with the precedence of the
outmost operator in this expression (the root of the derivation tree).
This allows to combine expression without unnecessary parentheses.
For example, if a=4*19 and b=20*54 are two expressions, then the lowest operation in both
is '*'. In this case a + b and a * b do not require parentheses:
a + b: 4 * 19 + 20 * 54
a * b: 4 * 19 * 20 * 54
whereas a^b does:
a^b: (4 * 19)^(20 * 54)
"""
def __init__(self, txt, outmost_operator_precedence):
self.txt = txt
self.outmost_operator_precedence = outmost_operator_precedence
@staticmethod
def highest(txt):
return ExpressionString(txt, OperatorPrecedence.HIGHEST)
@staticmethod
def lowest(txt):
return ExpressionString(txt, OperatorPrecedence.LOWEST)
def __format__(self, format_spec: str) -> str:
"""
format_spec should be the lowest operator precedence (e.g., 'PLUS', 'MUL', ...) from which
the resulting string does not require parentheses. If the current outmost operator
precedence is lower than the precedence in the format specification, parentheses will be
added.
For example, consider the format string '{x:MUL} * {y:MUL}'.
If x is the expression '5 + 7', parentheses will be added (as PLUS is lower than MUL) and
the result will start with '(5 + 7) * ...'. On the other hand, expressions such as '5 * 7'
and '5^7' will not require parentheses, and the result will start with '5 * 7 * ...' or
'5^7 * ...'.
"""
return self._maybe_add_parentheses(OperatorPrecedence[format_spec])
def __str__(self):
return self.txt
def __add__(self, other):
other = to_expr_string(other)
return ExpressionString(f'{self:PLUS} + {other:PLUS}', OperatorPrecedence.PLUS)
def __sub__(self, other):
# Note that self and other are not symmetric. For example (a + b) - (c + d) should be:
# a + b - (c + d).
other = to_expr_string(other)
return ExpressionString(f'{self:PLUS} - {other:MUL}', OperatorPrecedence.PLUS)
def __mul__(self, other):
other = to_expr_string(other)
return ExpressionString(f'{self:MUL} * {other:MUL}', OperatorPrecedence.MUL)
def __truediv__(self, other):
# Note that self and other are not symmetric. For example (a * b) / (c * d) should be:
# a * b / (c * d).
other = to_expr_string(other)
return ExpressionString(f'{self:MUL} / {other:POW}', OperatorPrecedence.MUL)
def __pow__(self, other):
other = to_expr_string(other)
# For the two expressions (a ** b) ** c and a ** (b ** c), parentheses will always be added.
return ExpressionString(f'{self:HIGHEST}^{other:HIGHEST}', OperatorPrecedence.POW)
def __neg__(self):
return ExpressionString(f'-{self:ADDROF}', OperatorPrecedence.LOWEST)
def address_of(self):
return ExpressionString(f'&{self:ADDROF}', OperatorPrecedence.ADDROF)
def prepend(self, txt):
"""
Prepends the given text to the string, without changing its OperatorPrecedence.
"""
return ExpressionString(txt + self.txt, self.outmost_operator_precedence)
def _maybe_add_parentheses(self, operator_precedence: OperatorPrecedence) -> str:
"""
Returns the expression without parentheses if the current precedence is less than or equal
to operator_precedence and with parentheses otherwise.
"""
if self.outmost_operator_precedence < operator_precedence:
return '(%s)' % self.txt
else:
return self.txt
def to_expr_string(val):
"""
If val is a string, returns it as an ExpressionString of the lowest operation level.
This means that including it in an expression will cause an addition of parentheses.
"""
if isinstance(val, str):
return ExpressionString.lowest(val)
assert isinstance(val, ExpressionString)
return val
| StarcoderdataPython |
3266828 | import uuid
from django.db import models
from multiselectfield import MultiSelectField
from common.util.choices import (
treasure_grade_choices,
treasure_type_choices,
currency_denomination_choices,
dices_choices,
damage_type_choices,
weapon_type_choices,
weapon_properties_choices,
)
# Create your models here.
class Item(models.Model):
item_uuid = models.UUIDField(
primary_key=True, default=uuid.uuid4, unique=True, editable=False
)
name = models.CharField(max_length=120, unique=True)
description = models.TextField(blank=True)
treasure_grade = models.CharField(
max_length=120, choices=treasure_grade_choices, default="normal"
)
treasure_type = models.CharField(
max_length=120, choices=treasure_type_choices, default="common"
)
cost = models.IntegerField(blank=True, null=True)
currency_denomination = models.CharField(
max_length=120, choices=currency_denomination_choices, default="gold"
)
weight = models.PositiveIntegerField(default=1, blank=True)
def __str__(self) -> str:
return self.name
class Weapon(Item):
is_ranged = models.BooleanField(default=False)
damage_die = models.CharField(max_length=100, choices=dices_choices, default="d6")
number_of_die = models.PositiveIntegerField(default=1)
damage_type = MultiSelectField(choices=damage_type_choices, null=True, blank=True)
weapon_type = models.CharField(
max_length=100, choices=weapon_type_choices, default="simple"
)
weapon_properties = MultiSelectField(
choices=weapon_properties_choices, null=True, blank=True
)
min_range = models.IntegerField(blank=True, null=True)
max_range = models.IntegerField(blank=True, null=True)
| StarcoderdataPython |
46099 | __all__ = [
'order_rate_over_time'
]
from .order_rate_over_time import order_rate_over_time
| StarcoderdataPython |
3355823 | from design_baselines.data import StaticGraphTask, build_pipeline
from design_baselines.logger import Logger
from design_baselines.utils import spearman
from design_baselines.permmdtraining_rep_coms_cleaned.trainers import ConservativeObjectiveModel
from design_baselines.data import StaticGraphTask, build_pipeline
from design_baselines.logger import Logger
from design_baselines.utils import spearman
from design_baselines.context_trainAoptimizemmd_rep_coms_cleaned.trainers import ConservativeObjectiveModel
from design_baselines.context_trainAoptimizemmd_rep_coms_cleaned.nets import ForwardModel
from design_baselines.context_trainAoptimizemmd_rep_coms_cleaned.nets import RepModel1
from design_baselines.context_trainAoptimizemmd_rep_coms_cleaned.nets import RepModel2
import tensorflow as tf
import numpy as np
import os
import click
import json
from tensorboard.plugins import projector
import random
def visualize1(data, labels, num):
num = str(num)
print(data.shape)
print(labels.shape)
# Set up a logs directory, so Tensorboard knows where to look for files.
log_dir='/nfs/kun2/users/hanqi2019/input_graph'
d1 = 'metadata.tsv'
d2 = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
#d2 = "var1"
d3 = "embedding.ckpt"
print(d1)
# Save Labels separately on a line-by-line manner.
with open(os.path.join(log_dir, d1), "w") as f:
for subwords in labels:
f.write("{}\n".format(subwords))
# Save the weights we want to analyze as a variable. Note that the first
# value represents any unknown word, which is not in the metadata, here
# we will remove this value.
weights = tf.Variable(data)
# Create a checkpoint from embedding, the filename and key are the
# name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=weights)
checkpoint.save(os.path.join(log_dir, d3))
# Set up config.
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
# The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`.
embedding.tensor_name = d2
embedding.metadata_path = d1
projector.visualize_embeddings(log_dir, config)
def visualize2(data, labels, num):
num = str(num)
print(data.shape)
print(labels.shape)
# Set up a logs directory, so Tensorboard knows where to look for files.
log_dir='/nfs/kun2/users/hanqi2019/solution_graph'
d1 = 'metadata.tsv'
d2 = "embedding/.ATTRIBUTES/VARIABLE_VALUE"
#d2 = "var1"
d3 = "embedding.ckpt"
print(d1)
# Save Labels separately on a line-by-line manner.
with open(os.path.join(log_dir, d1), "w") as f:
for subwords in labels:
f.write("{}\n".format(subwords))
# Save the weights we want to analyze as a variable. Note that the first
# value represents any unknown word, which is not in the metadata, here
# we will remove this value.
weights = tf.Variable(data)
# Create a checkpoint from embedding, the filename and key are the
# name of the tensor.
checkpoint = tf.train.Checkpoint(embedding=weights)
checkpoint.save(os.path.join(log_dir, d3))
# Set up config.
config = projector.ProjectorConfig()
embedding = config.embeddings.add()
# The name of the tensor will be suffixed by `/.ATTRIBUTES/VARIABLE_VALUE`.
embedding.tensor_name = d2
embedding.metadata_path = d1
projector.visualize_embeddings(log_dir, config)
def coms_cleaned(
logging_dir,
task,
task_relabel,
normalize_ys,
normalize_xs,
in_latent_space,
particle_lr,
particle_train_gradient_steps,
particle_evaluate_gradient_steps,
particle_entropy_coefficient,
forward_model_activations,
forward_model_hidden_size,
forward_model_final_tanh,
forward_model_lr,
forward_model_alpha,
forward_model_alpha_lr,
forward_model_overestimation_limit,
forward_model_noise_std,
forward_model_batch_size,
forward_model_val_size,
forward_model_epochs,
evaluation_samples,
fast,
latent_space_size,
rep_model_activations,
rep_model_lr,
rep_model_hidden_size,
noise_input,
mmd_param,
optmmd_param,
seed=10):
"""Solve a Model-Based Optimization problem using the method:
Conservative Objective Models (COMs).
"""
# store the command line params in a dictionary
params = dict(
logging_dir=logging_dir,
task=task,
task_relabel=task_relabel,
normalize_ys=normalize_ys,
normalize_xs=normalize_xs,
in_latent_space=in_latent_space,
particle_lr=particle_lr,
particle_train_gradient_steps=
particle_train_gradient_steps,
particle_evaluate_gradient_steps=
particle_evaluate_gradient_steps,
particle_entropy_coefficient=
particle_entropy_coefficient,
forward_model_activations=forward_model_activations,
forward_model_hidden_size=forward_model_hidden_size,
forward_model_final_tanh=forward_model_final_tanh,
forward_model_lr=forward_model_lr,
forward_model_alpha=forward_model_alpha,
forward_model_alpha_lr=forward_model_alpha_lr,
forward_model_overestimation_limit=
forward_model_overestimation_limit,
forward_model_noise_std=forward_model_noise_std,
forward_model_batch_size=forward_model_batch_size,
forward_model_val_size=forward_model_val_size,
forward_model_epochs=forward_model_epochs,
evaluation_samples=evaluation_samples,
fast=fast,
latent_space_size = latent_space_size,
rep_model_activations = rep_model_activations,
rep_model_lr = rep_model_lr,
rep_model_hidden_size = rep_model_hidden_size,
noise_input = noise_input,
mmd_param = mmd_param,
seed = seed)
# create the logger and export the experiment parameters
logger = Logger(logging_dir)
with open(os.path.join(logging_dir, "params.json"), "w") as f:
json.dump(params, f, indent=4)
# create a model-based optimization task
total = 2
name_list = ["TFBind8-GP-v01", "TFBind8-GP-v02"]
task_list = []
for i in name_list:
task1 = StaticGraphTask(i, relabel=task_relabel)
task_list.append(task1)
#all eight/four four
if normalize_ys:
for i in task_list:
i.map_normalize_y()
if task_list[0].is_discrete and not in_latent_space:
for i in task_list:
i.map_to_logits()
if normalize_xs:
for i in task_list:
i.map_normalize_x()
x_list = []
y_list = []
for i in task_list:
x_list.append(i.x)
y_list.append(i.y)
x1 = task_list[0].x
y1 = task_list[0].y
x = list(x1)
y = list(y1)
for i in range(1, total):
x += list(x_list[i])
y += list(y_list[i])
x = tf.constant(x)
y = tf.constant(y)
input_shape = x1.shape[1:]
print("input_shape:")
print(input_shape)
#print(x)
#print(y)
output_shape = latent_space_size
# make a neural network to predict scores
rep_model_final_tanh = False
rep_model1 = RepModel1(
input_shape, (64,1), activations=rep_model_activations,
hidden_size=rep_model_hidden_size,
final_tanh=rep_model_final_tanh)
rep_model2 = RepModel2(
(72,1), output_shape, activations=rep_model_activations,
hidden_size=rep_model_hidden_size,
final_tanh=rep_model_final_tanh)
forward_model = ForwardModel(
output_shape, activations=forward_model_activations,
hidden_size=forward_model_hidden_size,
final_tanh=forward_model_final_tanh)
# compute the normalized learning rate of the model
particle_lr = particle_lr * np.sqrt(np.prod(input_shape))
# select the top k initial designs from the dataset
initial_x_list = []
initial_y_list = []
for i in range(total):
indices1 = tf.math.top_k(y_list[i][:, 0], k=evaluation_samples)[1]
initial_x1 = tf.gather(x_list[i], indices1, axis=0)
initial_y1 = tf.gather(y_list[i], indices1, axis=0)
initial_x_list.append(initial_x1)
initial_y_list.append(initial_y1)
c_type = []
for i in range(total):
c1 = np.zeros(8, dtype='float32')
c1[i] = 1
c_type.append(c1)
c = [c_type[0]]*x_list[0].shape[0]
for i in range(1,total):
c = c+[c_type[i]]*x_list[i].shape[0]
c = tf.constant(c)
print(c.shape)
print(x.shape)
print(y.shape)
# make a trainer for the forward model
trainer = ConservativeObjectiveModel(mmd_param = mmd_param,
rep_model1=rep_model1, rep_model2 = rep_model2,
rep_model_lr=rep_model_lr,
forward_model=forward_model, forward_model_opt=tf.keras.optimizers.Adam,
forward_model_lr=forward_model_lr, alpha=forward_model_alpha,
alpha_opt=tf.keras.optimizers.Adam, alpha_lr=forward_model_alpha_lr,
overestimation_limit=forward_model_overestimation_limit,
particle_lr=particle_lr, noise_std=forward_model_noise_std,
particle_gradient_steps=particle_train_gradient_steps,
entropy_coefficient=particle_entropy_coefficient, x_ori=x, optmmd_param = optmmd_param)
# create a data set
val_size = int(x1.shape[0]*0.3)
train_data, validate_data = build_pipeline(c=c,
x=x, y=y, batch_size=forward_model_batch_size,
val_size=val_size)
np.random.seed(seed)
print("new")
# train the forward model
trainer.launch(train_data, validate_data,
logger, forward_model_epochs)
x_final_list = initial_x_list
y_final_list = initial_y_list
score_list = []
for i in range(2):
c_here = np.zeros(8, dtype = 'float32')
c_here[i]=1
c_pass = tf.constant([c_here]*x_final_list[i].shape[0])
print(c_pass)
x_neg = trainer.optimize(False, c_pass,
x_final_list[i], 50, training=False)
score = task1.predict(x_neg)
score = task1.denormalize_y(score)
score_list.append(score)
print(score_list)
# run COMs using the command line interface
if __name__ == '__main__':
coms_cleaned()
| StarcoderdataPython |
3244789 | <reponame>jia-wan/GeneralizedLoss-Counting-Pytorch
import torch
import os
import numpy as np
from datasets.crowd import Crowd
from models.vgg import vgg19
import argparse
args = None
def train_collate(batch):
transposed_batch = list(zip(*batch))
images = torch.stack(transposed_batch[0], 0)
points = transposed_batch[1] # the number of points is not fixed, keep it as a list of tensor
targets = transposed_batch[2]
st_sizes = torch.FloatTensor(transposed_batch[3])
return images, points, targets, st_sizes
def parse_args():
parser = argparse.ArgumentParser(description='Test ')
parser.add_argument('--data-dir', default='../../data/UCF_Bayes',
help='training data directory')
parser.add_argument('--save-dir', default='./model.pth',
help='model path')
parser.add_argument('--device', default='0', help='assign device')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip() # set vis gpu
datasets = Crowd(os.path.join(args.data_dir, 'test'), 512, 8, is_gray=False, method='val')
dataloader = torch.utils.data.DataLoader(datasets, 1, shuffle=False,
num_workers=1, pin_memory=False)
model = vgg19()
device = torch.device('cuda')
model.to(device)
model.load_state_dict(torch.load(os.path.join(args.save_dir), device))
epoch_minus = []
for inputs, count, name in dataloader:
inputs = inputs.to(device)
assert inputs.size(0) == 1, 'the batch size should equal to 1'
with torch.set_grad_enabled(False):
outputs = model(inputs)
temp_minu = len(count[0]) - torch.sum(outputs).item()
print(name, temp_minu, len(count[0]), torch.sum(outputs).item())
epoch_minus.append(temp_minu)
epoch_minus = np.array(epoch_minus)
mse = np.sqrt(np.mean(np.square(epoch_minus)))
mae = np.mean(np.abs(epoch_minus))
log_str = 'Final Test: mae {}, mse {}'.format(mae, mse)
print(log_str)
| StarcoderdataPython |
121281 | from api.dataset.models import DataSchema, Dataset
def verify_settings(model, p_key, settings):
details = eval(model).get(p_key)
for key, setting in settings.items():
print(getattr(details, key), setting)
setting = setting if setting else None
assert getattr(details, key) == setting
def load_data():
load_dataschema_data()
load_dataset()
def load_dataschema_data():
DataSchema.create_table(read_capacity_units=1, write_capacity_units=1, wait=True)
for i in range(50):
name = "WorkOrder_v%d" % i
data_schema = DataSchema(name)
data = {
"dataset": name,
"status": "RETIRED",
"description": "Schema for %s" % name,
"creation_timestamp": "2017-04-24T11:38:41.164Z",
"last_updated_timestamp": "2017-12-24T22:38:47.346Z",
}
for key in data:
setattr(data_schema, key, data[key])
data_schema.save()
def load_dataset():
for i in range(50):
data = {
"name": "Forecast_v%d" % i,
"description": "Providing a demand forecast",
"status": "ACTIVE",
"type": "TRANSACTIONAL",
"frequency": "DAILY",
"classification": "Orange",
"owner": "Forecast team",
"owner_contact": "forecast@",
"service_arn": "arn:aws:s3:::org.ent-data-lake",
"location_pointer": "my.org/Forecasting/Forecast_v1",
"creation_timestamp": "2017-01-12T11:39:43.164Z",
"derived_from": None,
"replaced_by": None,
"from_date": "2017-01-03",
"to_date": None,
"schema_name": "Forecast_schema_v1",
"schema_location": None,
"data_lineage": None,
"compliance": None,
"enforce_sla": None,
"sla_cron": "0 6 * * *",
"tags": [
{"key": "org", "value": "CRM"},
{"key": "cost", "value": "SupplyChain"},
],
"retention": None,
"encryption": None,
"encryption_type": None,
"encryption_kms_id": None,
"cross_region_replication": None,
"config_crr": None,
"password": None,
"s3_logging_enabled": None,
"config_s3_logging": None,
"requestor_pays_enabled": None,
}
name = "Forecast_v%d" % i
dataset = Dataset(name)
for key in data:
setattr(dataset, key, data[key])
dataset.save()
| StarcoderdataPython |
1622367 | #!/usr/bin/python
import subprocess
import argparse
from prometheus_client import Summary
from prometheus_client import start_http_server, Gauge
import random
import time
def bandwidth_measure_metric(server_ip,server_port):
'''
Returns current network bandwidth in Mbps
'''
try:
p = subprocess.Popen(['/usr/bin/iperf', '-c', server_ip],stdout=subprocess.PIPE)
except:
print("iperf command could not be executed")
out , err = p.communicate()
return(out.split()[37])
def prom_export_metrics(server_ip,server_port):
metric = Gauge('bandwidth_measure', 'Current network bandwidth')
metric.set_function(lambda: bandwidth_measure_metric(server_ip,server_port))
#To run as process
@metric.time()
def process_request(t):
"""A dummy function that takes some time."""
time.sleep(t)
while True:
process_request(random.random())
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--ip", help="IP address of iperf server")
parser.add_argument("--port",help="port number the iperf server is listening",default=5001)
args = parser.parse_args()
#IP Argument mandatory
if args.ip:
server_ip=args.ip
else:
print("Please specify Iperf Server IP")
exit()
start_http_server(8000)
prom_export_metrics(args.ip,args.port)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3230229 | <filename>streaming_helpers.py
import queue
import time
import numpy as np
class CameraInformation:
def __init__(self, cam_id: str):
self._frame_queue: queue.Queue = queue.Queue(maxsize=1)
self._frame_shape = None
self._last_frame_time = None
self.is_online = True
self.node_id = cam_id
def write_frame(self, frame):
try:
self._frame_queue.get_nowait()
except queue.Empty:
pass
self._frame_shape = frame.shape
self._last_frame_time = time.time()
self._frame_queue.put_nowait(frame)
def read_frame(self,):
try:
frame = self._frame_queue.get(timeout=2)
if not self.is_online:
self.is_online = True
return frame
except queue.Empty:
if self.is_online:
self.is_online = False
return np.zeros(self._frame_shape)
| StarcoderdataPython |
1798481 | <gh_stars>0
from flask import Flask, jsonify, render_template, request, redirect, session, url_for
import requests,os,json
from flask_googlemaps import Map
from flask_googlemaps import icons
from app import app, mainEngine, gMap
@app.route('/')
def index():
return render_template("mainlogin.html")
@app.route('/login', methods = ('GET','POST'))
def login():
error = False
if request.method=='POST':
email = request.form['email']
password = request.form['password']
error = True
if mainEngine.login(email, password):
session['email'] = email
return redirect('/customer/home')
return render_template("index.html", error = error, success = False, change = False)
@app.route('/admin', methods = ('GET','POST'))
def adminlogin():
error = False
if request.method=='POST':
username = request.form['username']
password = request.form['password']
error = True
if mainEngine.admin_login(username, password):
session['username'] = username
return redirect('/admin/home')
return render_template("adminLogin.html", error = error)
@app.route('/admin/home')
def admin_home():
return render_template("admin/home.html", username = session['username'] )
@app.route('/admin/carlist')
def admin_car():
cars = mainEngine.getAllCars()
return render_template("admin/carlist.html", cars = cars )
@app.route('/admin/addcar', methods = ('GET','POST'))
def addcar():
success = True
if request.method=='POST':
name = request.form['name']
colour = request.form['color']
description = request.form['description']
capacity = request.form['capacity']
registration_plate = request.form['regPlate']
fuel_type = request.form['fuel']
transmission = request.form['transmission']
type_ = request.form['type']
image = request.form['image']
longitude = request.form['longitude']
latitude = request.form['latitude']
if mainEngine.carRegistrationValidation(registration_plate):
mainEngine.insertCar(name, colour, description, capacity, registration_plate, fuel_type, transmission, type_, longitude, latitude, image)
return redirect("/admin/carlist")
success = False
return render_template("admin/addcar.html", success = success)
@app.route('/admin/editcarpage', methods = ['POST'])
def updatecarpage():
car_id = request.form['id']
car = mainEngine.getCar(car_id)[0]
return render_template('admin/editcar.html', car=car)
@app.route('/admin/edit-car', methods = ['POST'])
def editcar():
success = True
car_id = request.form['id']
car = mainEngine.getCar(car_id)[0]
name = request.form['name']
colour = request.form['color']
description = request.form['description']
capacity = request.form['capacity']
registration_plate = request.form['regPlate']
fuel_type = request.form['fuel']
transmission = request.form['transmission']
type_ = request.form['type']
image = request.form['image']
longitude = request.form['longitude']
latitude = request.form['latitude']
if mainEngine.editCarRegistrationValidation(registration_plate, car_id):
mainEngine.editCar(car_id, name, colour, description, capacity, registration_plate, fuel_type, transmission, type_, longitude, latitude, image)
return redirect("/admin/carlist")
success = False
return render_template("admin/editcar.html", car=car, success = success)
@app.route('/admin/userlist')
def userlist():
customers = mainEngine.getAllCustomers()
addresses = mainEngine.getAllAddresses()
licenses = mainEngine.getAllLicenses()
return render_template("admin/userlist.html", customers = customers, addresses = addresses, licenses = licenses)
@app.route('/register', methods = ('GET', 'POST'))
def register():
error = False
validDOB = True
validLicense = True
if request.method=='POST':
# personal details
fname = request.form['fname']
lname = request.form['lname']
dob = request.form['date']
phone = request.form['phone']
email = request.form['email']
password = request.form['password']
# address details
unit = request.form['unitno']
street = request.form['street']
suburb = request.form['suburb']
state = request.form['state']
postcode = request.form['postcode']
string_address = postcode + ", " + state
# license details
lnumber = request.form['lnumber']
country = request.form['country']
license_state = request.form['license-state']
issue_date = request.form['idate']
expiry_date = request.form['edate']
if mainEngine.check_duplicate_email(email) is False:
if mainEngine.validateDOB(dob) is True:
if mainEngine.validateLicenseDate(issue_date, expiry_date):
mainEngine.register(fname, lname, dob, email, password, phone)
cust_id = mainEngine.getCustomer(email)[0]
url = 'https://maps.googleapis.com/maps/api/geocode/json'
params = {'sensor': 'false', 'address': string_address, 'key': '<KEY>'}
r = requests.get(url, params=params)
results = r.json()['results']
location = results[0]['geometry']['location']
mainEngine.insertAddress(cust_id, unit, street, suburb, state, postcode, location['lat'], location['lng'])
mainEngine.insertLicense(cust_id, lnumber, country, license_state, issue_date, expiry_date)
return render_template("index.html", error = False, success = True, change = False)
else:
validLicense = False
else:
validDOB = False
else:
error = True
return render_template("register.html", error = error, validDOB = validDOB, validLicense = validLicense)
@app.route('/logout')
def logout():
session.clear()
return redirect("/")
@app.route('/customer/home')
def home():
return render_template("customer/home.html", email = session['email'])
@app.route('/customer/booking', methods = ('GET', 'POST'))
def booking():
valid = True
if request.method=='POST':
car_id = request.form['car']
start_date = request.form['sdate']
start_time = request.form['stime']
end_date = request.form['edate']
end_time = request.form['etime']
if mainEngine.validateBookingTime(start_date, start_time, end_date, end_time):
time = mainEngine.getTotalBookingTime(start_date, start_time, end_date, end_time)
cust = mainEngine.getCustomer(session['email'])
plan = cust[7]
cars = mainEngine.getCar(car_id)
cost = 0
if plan == 0:
cost = mainEngine.getTotalBookingCost(start_date, start_time, end_date, end_time, cars[0][10])
else:
cost = mainEngine.getTotalBookingCost(start_date, start_time, end_date, end_time, 15)
return render_template("customer/bookingPayment2.html", cars=cars, start_date=start_date, start_time=start_time, end_date=end_date, end_time=end_time, time=time, cost=cost, success=True)
else:
valid = False
cars = mainEngine.getAvalaibleCars()
mark = []
if cars:
for car in cars:
mark.append((float(car[12]), float(car[11]), car[1]))
gmap = Map(
identifier="gmap",
varname="gmap",
#MELBOURNE COORDINATE
lat=-37.8136,
lng=144.9631,
markers={
icons.dots.blue: mark,
},
style="height:max-500px;max-width:1000px;margin:0;margin-left:auto;margin-right:auto;",
)
return render_template("customer/booking2.html", cars = cars, gmap = gmap, valid = valid)
@app.route('/customer/booking-summary', methods = ['POST'])
def booking_summary():
if request.method=='POST':
car_id = request.form['car']
start_date = request.form['sdate']
start_time = request.form['stime']
end_date = request.form['edate']
end_time = request.form['etime']
time = request.form['time']
cost = request.form['cost']
name = request.form['namecard']
card = request.form['cardnumber']
date = request.form['date']
cvv = request.form['cvv']
cust = mainEngine.getCustomer(session['email'])
if mainEngine.card_validation(name, card, date, cvv):
mainEngine.makeBooking(cust[0], car_id, start_date, start_time, end_date, end_time, time, cost)
mainEngine.setCarUnavalaible(car_id)
return redirect("/customer/ongoing-booking")
else:
cars = mainEngine.getCar(car_id)
return render_template("customer/bookingPayment2.html", cars=cars, start_date=start_date, start_time=start_time, end_date=end_date, end_time=end_time, time=time, cost=cost, success=False)
@app.route('/reset-password', methods = ('GET', 'POST'))
def reset():
invalid = False
diff = False
if request.method=='POST':
fname = request.form['fname']
lname = request.form['lname']
dob = request.form['date']
email = request.form['email']
password = request.form['pass']
confirm_password = request.form['<PASSWORD>']
cust_id = mainEngine.validateCustCredentials(email, fname, lname, dob)
if cust_id is not None:
if mainEngine.confirmPassword(password, confirm_password):
mainEngine.resetPassword(password, cust_id)
return render_template("index.html", error = False, success = False, change = True)
else:
diff = True
else:
invalid = True
return render_template("reset-1.html", invalid = invalid, diff = diff)
@app.route('/customer/ongoing-booking')
def ongoing_booking():
cust_id = mainEngine.getCustomer(session['email'])[0]
bookings = mainEngine.getPersonalOngoingBooking(cust_id)
cars = mainEngine.getAllCars()
return render_template("customer/ongoingBooking.html", bookings = bookings, cars = cars)
@app.route('/customer/booking-history')
def booking_history():
cust_id = mainEngine.getCustomer(session['email'])[0]
bookings = mainEngine.getPersonalBookingHistory(cust_id)
cars = mainEngine.getAllCars()
return render_template("customer/bookingHistory.html", bookings = bookings, cars = cars)
@app.route('/customer/search-car', methods = ('GET', 'POST'))
def search_car():
cars = {}
if request.method=='POST':
column = request.form['column']
search = request.form['search']
cars = mainEngine.searchCars(column, search)
mark = []
if cars:
for car in cars:
mark.append((float(car[12]), float(car[11]), car[1]))
gmap = Map(
identifier="gmap",
varname="gmap",
#MELBOURNE COORDINATE
lat=-37.8136,
lng=144.9631,
markers={
icons.dots.blue: mark,
},
style="height:max-500px;max-width:1000px;margin:0;margin-left:auto;margin-right:auto;",
)
return render_template("/customer/searchCar.html", gmap=gmap, cars= cars)
@app.route('/customer/search-car-near-me', methods = ['POST'])
def search_car_by_location():
cust_id = mainEngine.getCustomer(session['email'])[0]
address = mainEngine.getAddress(cust_id)
car_list = mainEngine.getAvalaibleCars()
cars = mainEngine.distance(car_list, address)
mark = []
if cars:
for car in cars:
mark.append((float(car[12]), float(car[11]), car[1]))
gmap = Map(
identifier="gmap",
varname="gmap",
#MELBOURNE COORDINATE
lat=address[7],
lng=address[8],
markers={
icons.dots.blue: mark,
},
style="height:max-500px;max-width:1000px;margin:0;margin-left:auto;margin-right:auto;",
)
return render_template("/customer/searchCar.html", gmap=gmap, cars= cars)
@app.route('/customer/plan')
def plan():
cust = mainEngine.getCustomer(session['email'])
plan = cust[7]
return render_template("/customer/price.html", plan = plan)
@app.route('/customer/set-plan')
def set_standard():
cust = mainEngine.getCustomer(session['email'])
mainEngine.set_plan(cust[7],cust[0])
return redirect("/customer/plan")
@app.route('/customer/cancel-booking', methods = ['POST'])
def cancel_booking():
booking_id = request.form['booking_id']
mainEngine.cancelBooking(booking_id)
booking = mainEngine.getBooking(booking_id)
mainEngine.setCarAvalaible(booking[0][2])
return redirect("/customer/ongoing-booking")
@app.route('/customer/complete-booking', methods = ['POST'])
def complete_booking():
booking_id = request.form['booking_id']
mainEngine.completeBooking(booking_id)
booking = mainEngine.getBooking(booking_id)
mainEngine.setCarAvalaible(booking[0][2])
return redirect("/customer/booking-history")
@app.route('/admin/delete-car', methods = ['POST'])
def delete_car():
car_id = request.form['car_id']
mainEngine.deleteCar(car_id)
return redirect("/admin/carlist")
@app.route('/customer/plan-summary', methods = ('GET', 'POST'))
def plan_summary():
cust = mainEngine.getCustomer(session['email'])
if request.method=='POST':
name = request.form['namecard']
card = request.form['cardnumber']
date = request.form['date']
cvv = request.form['cvv']
if mainEngine.card_validation(name, card, date, cvv):
mainEngine.set_premium_expiry(cust[0])
else:
return render_template("customer/planPayment2.html", success=False)
cust = mainEngine.getCustomer(session['email'])
if not cust[8] or not mainEngine.validate_premium(cust[8]):
return render_template("customer/planPayment2.html", success=True)
return redirect("/customer/set-plan")
| StarcoderdataPython |
3214227 | import torch
from torch import nn
class Model(nn.Module):
def __init__(self):
super().__init__()
self._conv_part = nn.Sequential(
nn.Conv2d(3, 6, 5, padding=2),
nn.ReLU(),
nn.MaxPool2d(2),
nn.Conv2d(6, 16, 5),
nn.ReLU(),
nn.MaxPool2d(2),
)
self._fc_part = nn.Sequential(
nn.Linear(16*5*5, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU()
)
self._pred_part = nn.Sequential(
nn.Linear(64, 64),
nn.Tanh()
)
self._loss = nn.BCEWithLogitsLoss()
def get_repr(self, batch):
feats_in = batch.feats_in
feats_out = batch.feats_out
if next(self.parameters()).is_cuda:
feats_in = feats_in.cuda()
feats_out = feats_out.cuda()
n_batch, n_ex, c, w, h = feats_in.shape
conv_in = self._conv_part(feats_in.view(n_batch * n_ex, c, w, h))
fc_in = self._fc_part(conv_in.view(n_batch * n_ex, 16*5*5))
return fc_in.view(n_batch, n_ex, 64).sum(dim=1)
def forward(self, batch):
feats_in = batch.feats_in
feats_out = batch.feats_out
label_out = batch.label_out
if next(self.parameters()).is_cuda:
feats_in = feats_in.cuda()
feats_out = feats_out.cuda()
label_out = label_out.cuda()
n_batch, n_ex, c, w, h = feats_in.shape
conv_in = self._conv_part(feats_in.view(n_batch * n_ex, c, w, h))
fc_in = self._fc_part(conv_in.view(n_batch * n_ex, 16*5*5))
predictor = self._pred_part(fc_in.view(n_batch, n_ex, 64).sum(dim=1))
conv_out = self._conv_part(feats_out)
rep_out = self._fc_part(conv_out.view(n_batch, 16*5*5))
score = (predictor * rep_out).sum(dim=1)
labels = (score > 0).float()
loss = self._loss(score, label_out)
return loss, (labels == label_out).float().mean(), labels, predictor
| StarcoderdataPython |
3278805 | # В рамках этого испытания вы реализуете небольшой набор функций, работающих с отрезками прямых на двухмерной плоскости.
# Отрезок в нашем случае будет кодироваться в виде пары пар и выглядеть как-то так: ((x1, y1), (x2, y2))
# (вложенные пары — это концы отрезка). Вам нужно реализовать четыре функции:
#
# is_degenerated должна возвращать истину, если отрезок вырожден в точку (начало и конец совпадают);
# is_vertical должна возвращать "истину", если отрезок — вертикальный;
# is_horizontal должна возвращать "истину", если отрезок — горизонтальный;
# is_inclined должна возвращать "истину", если отрезок — наклонный (не вертикальный и не горизонтальный).
def is_degenerated(line):
(x1, y1), (x2, y2) = line
return x1 == x2 and y1 == y2
def is_vertical(line):
(x1, y1), (x2, y2) = line
return x1 == x2 and y1 != y2
def is_horizontal(line):
(x1, y1), (x2, y2) = line
return x1 != x2 and y1 == y2
def is_inclined(line):
(x1, y1), (x2, y2) = line
return x1 != x2 and y1 != y2
print(is_degenerated(((10, 10), (10, 10))))
print(is_vertical(((10, 11), (10, 10))))
print(is_horizontal(((11, 10), (10, 10))))
print(is_inclined(((10, 11), (12, 10))))
| StarcoderdataPython |
4820067 | """
Code Generator - https://github.com/wj-Mcat/code-generator
Authors: <NAME> (吴京京) <https://github.com/wj-Mcat>
2020-now @ Copyright wj-Mcat
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import annotations
from typing import List, Optional, Union
from dataclasses import dataclass
import json
from pyee import AsyncIOEventEmitter # type: ignore
from wechaty import Contact, Room # type: ignore
from wechaty_puppet import ( # type: ignore
Puppet, FileBox, RoomQueryFilter,
MiniProgramPayload, UrlLinkPayload, MessageQueryFilter,
PuppetOptions, EventType,
get_logger,
EventMessagePayload)
from wechaty_puppet.schemas.types import ( # type: ignore
MessagePayload,
ContactPayload,
FriendshipPayload,
ImageType,
RoomInvitationPayload,
RoomPayload,
RoomMemberPayload
)
from wechaty_puppet_mock.exceptions import WechatyPuppetMockError
from wechaty_puppet_mock.mock.mocker import Mocker, MockerResponse
log = get_logger('PuppetMock')
@dataclass
class PuppetMockOptions(PuppetOptions):
"""options for puppet mock"""
mocker: Optional[Mocker] = None
# pylint: disable=too-many-public-methods
class PuppetMock(Puppet):
"""mock for puppet"""
def __init__(self, options: PuppetMockOptions, name: str = 'puppet-mock'):
super().__init__(options, name)
if not options.mocker:
raise WechatyPuppetMockError('mocker in options is required')
self.mocker: Mocker = options.mocker
self.started: bool = False
self.emitter = AsyncIOEventEmitter()
async def message_image(self, message_id: str,
image_type: ImageType) -> FileBox:
"""get image from message"""
async def ding(self, data: Optional[str] = None):
pass
def on(self, event_name: str, caller):
"""listen event"""
self.emitter.on(event_name, caller)
def listener_count(self, event_name: str) -> int:
"""get the event count of the specific event"""
listeners = self.emitter.listeners(event=event_name)
return len(listeners)
async def start(self) -> None:
"""star the account"""
self.started = True
if not self.mocker:
raise WechatyPuppetMockError(
'PuppetMock should not start without mocker'
)
def _emit_events(response: MockerResponse):
"""emit the events from the mocker"""
payload_data = json.loads(response.payload)
if response.type == int(EventType.EVENT_TYPE_MESSAGE):
log.debug('receive message info <%s>', payload_data)
event_message_payload = EventMessagePayload(
message_id=payload_data['messageId'])
self.emitter.emit('message', event_message_payload)
self.mocker.on('stream', _emit_events)
async def stop(self):
"""stop the account"""
self.started = False
async def contact_list(self) -> List[str]:
"""get all of the contact"""
return self.mocker.get_contact_ids()
async def tag_contact_delete(self, tag_id: str) -> None:
pass
async def tag_favorite_delete(self, tag_id: str) -> None:
pass
async def tag_contact_add(self, tag_id: str, contact_id: str):
pass
async def tag_favorite_add(self, tag_id: str, contact_id: str):
pass
async def tag_contact_remove(self, tag_id: str, contact_id: str):
pass
async def tag_contact_list(self,
contact_id: Optional[str] = None) -> List[str]:
pass
async def message_send_text(self, conversation_id: str, message: str,
mention_ids: List[str] = None) -> str:
"""send the text message to the specific contact/room"""
conversation: Union[Room, Contact]
if conversation_id.startswith('room-'):
conversation = self.mocker.Room.load(conversation_id)
else:
conversation = self.mocker.Contact.load(conversation_id)
message_id = self.mocker.send_message(
talker=self.mocker.login_user,
conversation=conversation,
msg=message
)
return message_id
async def message_send_contact(self, contact_id: str,
conversation_id: str) -> str:
pass
async def message_send_file(self, conversation_id: str,
file: FileBox) -> str:
pass
async def message_send_url(self, conversation_id: str, url: str) -> str:
pass
async def message_send_mini_program(self,
conversation_id: str,
mini_program: MiniProgramPayload
) -> str:
pass
async def message_search(self, query: Optional[MessageQueryFilter] = None
) -> List[str]:
pass
async def message_recall(self, message_id: str) -> bool:
pass
async def message_payload(self, message_id: str) -> MessagePayload:
"""get the message payload"""
return self.mocker.environment.get_message_payload(
message_id=message_id)
async def message_forward(self, to_id: str, message_id: str):
pass
async def message_file(self, message_id: str) -> FileBox:
"""get the file-box from message instance
save the file-box data in message_payload.text field to avoid creating a
new structure to support this feature
"""
message_payload = self.mocker.environment.get_message_payload(
message_id=message_id
)
return FileBox.from_json(message_payload.text)
async def message_contact(self, message_id: str) -> str:
"""get the message Contact id info
text field save the message contact_id info
"""
message_payload = self.mocker.environment.get_message_payload(
message_id=message_id
)
return message_payload.text
async def message_url(self, message_id: str) -> UrlLinkPayload:
"""get the url link """
async def message_mini_program(self, message_id: str) -> MiniProgramPayload:
pass
async def contact_alias(self, contact_id: str,
alias: Optional[str] = None) -> str:
"""get/save the contact alias"""
contact_payload = self.mocker.environment.\
get_contact_payload(contact_id)
if not alias:
return contact_payload.alias
contact_payload.alias = alias
self.mocker.environment.update_contact_payload(contact_payload)
return alias
async def contact_payload_dirty(self, contact_id: str):
pass
async def contact_payload(self, contact_id: str) -> ContactPayload:
"""get the contact payload"""
return self.mocker.environment.get_contact_payload(contact_id)
async def contact_avatar(self, contact_id: str,
file_box: Optional[FileBox] = None) -> FileBox:
"""get the contact avatar"""
contact_payload = self.mocker.environment.\
get_contact_payload(contact_id)
if not file_box:
return FileBox.from_base64(
contact_payload.avatar,
name=f'{contact_payload.name}.png'
)
contact_payload.avatar = file_box.base64
self.mocker.environment.update_contact_payload(contact_payload)
async def contact_tag_ids(self, contact_id: str) -> List[str]:
pass
def self_id(self) -> str:
return self.mocker.login_user.contact_id
async def friendship_search(self, weixin: Optional[str] = None,
phone: Optional[str] = None) -> Optional[str]:
pass
async def friendship_add(self, contact_id: str, hello: str):
pass
async def friendship_payload(self, friendship_id: str,
payload: Optional[FriendshipPayload] = None
) -> FriendshipPayload:
pass
async def friendship_accept(self, friendship_id: str):
pass
async def room_list(self) -> List[str]:
"""get the room id list"""
rooms = self.mocker.environment.get_room_payloads()
return [room.id for room in rooms]
async def room_create(self, contact_ids: List[str],
topic: str = None) -> str:
"""create the room"""
room_payload = self.mocker.environment.new_room_payload(
member_ids=contact_ids,
topic=topic
)
return room_payload.id
async def room_search(self, query: RoomQueryFilter = None) -> List[str]:
pass
async def room_invitation_payload(self,
room_invitation_id: str,
payload: Optional[
RoomInvitationPayload] = None
) -> RoomInvitationPayload:
pass
async def room_invitation_accept(self, room_invitation_id: str):
pass
async def contact_self_qr_code(self) -> str:
pass
async def contact_self_name(self, name: str):
pass
async def contact_signature(self, signature: str):
pass
async def room_payload(self, room_id: str) -> RoomPayload:
"""get the room payload"""
return self.mocker.environment.get_room_payload(room_id)
async def room_members(self, room_id: str) -> List[str]:
"""get the room member ids from environment
Args:
room_id (str): the union identification for room
Returns:
List[str]: room member ids
"""
room_payload: RoomPayload = self.mocker.environment.get_room_payload(
room_id)
return room_payload.member_ids
async def room_add(self, room_id: str, contact_id: str):
"""add a contact to a room"""
self.mocker.add_contact_to_room(
contact_ids=[contact_id],
room_id=room_id
)
async def room_delete(self, room_id: str, contact_id: str):
pass
async def room_quit(self, room_id: str):
pass
async def room_topic(self, room_id: str, new_topic: str):
pass
async def room_announce(self, room_id: str,
announcement: str = None) -> str:
pass
async def room_qr_code(self, room_id: str) -> str:
pass
async def room_member_payload(self, room_id: str,
contact_id: str) -> RoomMemberPayload:
pass
async def room_avatar(self, room_id: str) -> FileBox:
pass
async def logout(self):
pass
async def login(self, user_id: str):
"""login the user data"""
self.mocker.login(user_id=user_id)
| StarcoderdataPython |
1796884 | <reponame>ryanapfel/clustering
from src.utils import Cluster, Scenes, Users, User
import pandas as pd
import numpy as np
import math
import random
class K_Mean:
def __init__(self, _df, _K):
self.df = _df
self.emotions = self.df.emotion.unique()
self.emotionMap = {e: idx for idx, e in enumerate(self.emotions )}
self.M = self.emotions.shape[0]
self.K = _K
self.uniqueUsers = self.df.user.unique()
# create hash values for contents and scenes and assign as index
self.df['hash'] = self.df.apply(lambda x: hash((x.sceneIdx, x.contentIdx)), axis=1)
userInfo = Users(self.df, self.uniqueUsers)
self.globalScene = Scenes(self.df, self.uniqueUsers)
self.users = {}
for ithU in self.uniqueUsers:
self.users[ithU] = User(self.K, self.emotions, userInfo[ithU], ithU)
self.clusters = []
self.uInClust = set()
def oneVeresusAll(self,epochs, n=40, multiplier=3):
self.initializeClusters(n=n, multiplier=multiplier, debug=False)
for i in range(epochs):
self.cUsers(self.uniqueUsers, scenes='all')
self.assignClusters(method='all', debug=False)
self.updateCentroid2()
def initializeClusters(self, n=40, multiplier=3, debug=False):
# get users w/lots of responses and high entropy
usersRemain = self.getKUsers_entropy(n, self.K * multiplier)
#get jp matrices for users
self.calculate1vAll(usersRemain)
#calculate the K most heterogeneous centers
points = self.getTopCenters(usersRemain)
# assign K users to cluster
for k in range(self.K):
self.clusters.append(Cluster(self.K, self.emotions, self.df, [points[k]], k ))
self.users[points[k]].setCluster(k)
self.uInClust.add(points[k])
# update centroid so user is now centroid of cluster
self.updateCentroid()
# get the scenes and corresponding users that the K users currently in cluster have all responded to
usersToUpdate, sceneToUpdate = self.clustSceneIntersection()
#calculate all users that have responded to scenes in the cluster
self.cUsers(users=usersToUpdate, scenes=sceneToUpdate)
self.cUsers(users=points, scenes=sceneToUpdate)
self.assignClusters(method='selection', debug=debug, users=usersToUpdate)
self.updateCentroid()
def calculate1vAll(self, users ):
for ithUser in users:
_u = self.users[ithUser]
_u.struct.reset()
scenes = _u.scenes
u_responses = _u.emotions
for idx, ithScene in enumerate(scenes):
u_response = u_responses[idx]
o_responses = self.globalScene[ithScene][1]
for k in range(self.K):
_u.updateStruct(k, u_response, o_responses)
def setUserClusters(self,_u, u_emotion, scene):
for k in self.clusters:
users_k = k.users
responses_k = k.sceneInfo[scene][-1]
_u.updateStruct( k.clusterId , u_emotion, responses_k)
# def pcUsers(self, users, scenes='all', debug=False, parallel=False ):
# p = Pool(processes=2)
# keys, values= zip(*self.users.items())
# processed_values = p.map( self.pcessUser, values)
# p.close()
# p.join()
# return processed_values
def pcessUser(self, _u):
_u.struct.reset()
for sceneIdx, scene in enumerate(_u.scenes):
u_emotion = _u.emotions[sceneIdx]
if type(scenes) != list:
self.setUserClusters(_u, u_emotion, scene)
elif scene in scenes:
self.setUserClusters(_u, u_emotion, scene)
def cUsers(self, users, scenes='all', debug=False, parallel=False ):
for ithUser in users:
_u = self.users[ithUser]
_u.struct.reset()
for sceneIdx, scene in enumerate(_u.scenes):
u_emotion = _u.emotions[sceneIdx]
if type(scenes) != list:
self.setUserClusters(_u, u_emotion, scene)
elif scene in scenes:
self.setUserClusters(_u, u_emotion, scene)
def calculateUsers(self, method='inCluster', debug=False, **kwargs):
if method == 'all':
self.cAll(debug=debug, parallel=False)
elif method == 'sceneAndUser' and 'users' in kwargs and 'scenes' in kwargs:
users = kwargs['users']
scenes = kwargs['scenes']
else:
print("Calculate Users Error")
print("Done", method)
def assignClusters(self, method='all', debug=False, **kwargs):
if method == 'all':
users = self.uniqueUsers
elif method == 'selection' and 'users' in kwargs:
users = kwargs['users']
newUsersInCluster = [[] for i in range(self.K)]
# update user cluster assignment and keep track of new users in cluster
for user in users:
_u = self.users[user]
_u.struct.getProb()
clustToUserNorms = np.zeros(self.K)
for idx, k in enumerate(self.clusters):
clustToUserNorms[idx] = k.struct.AvgL1_Norm(_u.struct)
# find the cluster index with lowest norm
kAssignIdx = np.argmin(clustToUserNorms)
# set cluster for user _u to cluster with min norm
_u.setCluster(kAssignIdx)
# assign user to cluster at [kAssignIdx]
newUsersInCluster[kAssignIdx].append(user)
# update set (keeps track of users that need a cluster)
self.uInClust.add(_u.id)
for i,_k in enumerate(self.clusters):
_k.setUsers( newUsersInCluster[i])
def updateCentroid(self, debug=False):
for clust in self.clusters:
clust.updateCentroid(self.users)
def updateCentroid2(self):
for c1 in self.clusters:
for c2 in self.clusters:
c1.updateCentroid2(c2, self.users)
def getTopCenters(self, usersRemain):
# randomly choose one from users to be k=1 cluster
k1idx, k1 = random.choice(list(enumerate(usersRemain)))
points = [k1]
usersRemain.pop(k1idx)
# find remaining clusters by maximizing distance
for kth in range(self.K - 1):
maxForK = []
for uIdx, user in enumerate(usersRemain):
# maximize distance from those in points
collectiveDist = 0
for p in points:
collectiveDist += self.users[user].struct.AvgL1_Norm(self.users[p].struct)
maxForK.append(collectiveDist)
argMaxU = np.argmax(np.array(maxForK))
points.append(usersRemain[argMaxU])
usersRemain.pop(argMaxU)
return points
def clustSceneIntersection(self):
listofScenes = []
for i in range(self.K):
listofScenes.append(self.users[self.clusters[i].users[0]].scenes)
intersection = listofScenes[0]
for i in range(1, self.K):
intersection = np.intersect1d(intersection, listofScenes[i])
totUsers = []
for scenes in intersection:
totUsers.append(np.unique(self.globalScene[scenes][0]))
totUsers = np.array(totUsers)
union = totUsers[0]
for i in range(1, len(intersection)):
union = np.union1d(union, totUsers[i])
# return users (union) that have responded to scenes in intersection
return union, intersection
def getKUsers_entropy(self, minScenes, nUsers):
def Sort_Tuple(tup):
tup.sort(key = lambda x: x[1])
return tup
# gets most responded to scenes to ensure every user selected shares at least one scene
highestScenes = self.df.groupby('hash').agg({'user':pd.Series.nunique}).sort_values(by='user', ascending=False).reset_index().loc[:5, 'hash'].to_numpy()
kk12 = self.df[self.df['user'].isin(np.unique(self.globalScene[highestScenes[0]][0]))]
filterUsers = filterSceneCount(kk12, minScenes)
ar = []
for us in filterUsers:
t = np.unique(self.users[us].emotions, return_counts=True)
s_e = sum([(i / sum(t[1])) * math.log2((i / sum(t[1]))) for i in t[1]]) * -1
ar.append((us, s_e))
# get users w. highest entropy
sortedEntrop = Sort_Tuple(ar)[-nUsers:]
r = []
for i in sortedEntrop:
r.append(i[0])
return r
def load_weights(self, path):
pass
def save_weights(self, path):
pass
def filterSceneCount(df, minScenes):
users = df.groupby(df.user).agg({"hash": pd.Series.nunique})
mask = users.apply(lambda x: x.hash >= minScenes, axis=1).reset_index(name='bb')
kk = df.merge(mask,on='user',how='left')
k3 = kk.loc[kk.bb == True, :]
return k3.user.unique()
| StarcoderdataPython |
3303845 | #!/usr/bin/env python
import npyscreen, curses
class MyTestApp(npyscreen.NPSAppManaged):
def on_start(self):
# When Application starts, set up the Forms that will be used.
# These two forms are persistent between each edit.
self.add_form("MAIN", MainForm, name="Screen 1", color="IMPORTANT", )
self.add_form("SECOND", MainForm, name="Screen 2", color="WARNING", )
# This one will be re-created each time it is edited.
self.add_form_class("THIRD", MainForm, name="Screen 3", color="CRITICAL", )
def on_clean_exit(self):
npyscreen.notify_wait("Goodbye!")
def change_form(self, name):
# Switch forms. NB. Do *not* call the .edit() method directly (which
# would lead to a memory leak and ultimately a recursion error).
# Instead, use the method .switchForm to change forms.
self.switch_form(name)
# By default the application keeps track of every form visited.
# There's no harm in this, but we don't need it so:
self.reset_history()
class MainForm(npyscreen.ActionForm):
def create(self):
self.add(npyscreen.TitleText, name = "Text:", value= "Press ^T to change screens" )
self.add_handlers({"^T": self.change_forms})
def on_ok(self):
# Exit the application if the OK button is pressed.
self.parentApp.switch_form(None)
def change_forms(self, *args, **keywords):
if self.name == "Screen 1":
change_to = "SECOND"
elif self.name == "Screen 2":
change_to = "THIRD"
else:
change_to = "MAIN"
# Tell the MyTestApp object to change forms.
self.parentApp.change_form(change_to)
def main():
TA = MyTestApp()
TA.run()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1769739 | """
This modules implements the bulk of Bot Evolution.
"""
import numpy as np
import copy
import settings
from utility import seq_is_equal, distance_between, angle_is_between, find_angle
from neural_network import NNetwork, sigmoid, softmax
class Population:
"""
The environment of bots and food.
"""
def __init__(self, size, mutation_rate):
assert(size >= 5)
assert(0 < mutation_rate < 1)
self.SIZE = size
self.mutation_rate = mutation_rate
self.bots = []
self.food = []
self.time_since_last_death = 0.0
# The neural network will have 1 neuron in the input layer, 1 hidden
# layer with 2 neurons, and 4 neurons in the output layer. The sigmoid
# activation function will be used on the hidden layer, and a softmax
# activation function will be used on the output layer. Input consists
# of the bot's direction and if there is or isn't food in the bots field
# of vision. Output consists of whether or not to move foward, turn
# left, turn right, or do nothing.
for i in range(size):
random_rgb = (np.random.randint(30, 256), np.random.randint(30, 256), np.random.randint(30, 256))
self.bots.append(Bot(NNetwork((1, 2, 4), (sigmoid, softmax)), random_rgb, self))
self.food.append(Food(self))
def eliminate(self, bot, replace = False):
self.time_since_last_death = 0.0
self.bots.remove(bot)
if replace:
random_rgb = (np.random.randint(30, 256), np.random.randint(30, 256), np.random.randint(30, 256))
self.bots.append(Bot(NNetwork((1, 2, 4), (sigmoid, softmax)), random_rgb, self))
def feed(self, bot, food):
bot.score = 1.0
self.food.remove(food)
self.food.append(Food(self))
num_to_replace = int(self.SIZE / 7 - 1)
if num_to_replace < 2:
num_to_replace = 2
for i in range(num_to_replace):
weakest = self.bots[0]
for other in self.bots:
if other.score < weakest.score:
weakest = other
self.eliminate(weakest)
for i in range(num_to_replace):
if np.random.uniform(0, 1) <= self.mutation_rate:
new_rgb = [bot.RGB[0], bot.RGB[1], bot.RGB[2]]
new_rgb[np.random.choice((0, 1, 2))] = np.random.uniform(30, 256)
new_bot = Bot(bot.nnet, new_rgb, self)
new_bot.x = bot.x + Bot.HITBOX_RADIUS * 4 * np.random.uniform(0, 1) * np.random.choice((-1, 1))
new_bot.y = bot.y + Bot.HITBOX_RADIUS * 4 * np.random.uniform(0, 1) * np.random.choice((-1, 1))
nb_c = new_bot.nnet.connections
mutated = False
while not mutated:
for k in range(len(nb_c)):
for i in range(nb_c[k].FROM.SIZE):
for j in range(nb_c[k].TO.SIZE):
if np.random.uniform(0, 1) <= self.mutation_rate:
nb_c[k].weights[i][j] = nb_c[k].weights[i][j] * np.random.normal(1, 0.5) + np.random.standard_normal()
mutated = True
self.bots.append(new_bot)
else:
new_bot = Bot(bot.nnet, bot.RGB, self)
new_bot.x = bot.x + Bot.HITBOX_RADIUS * 4 * np.random.uniform(0, 1) * np.random.choice((-1, 1))
new_bot.y = bot.y + Bot.HITBOX_RADIUS * 4 * np.random.uniform(0, 1) * np.random.choice((-1, 1))
self.bots.append(new_bot)
def update(self, dt):
"""
Updates the population's internals. The bulk of event handling for all
bots and food starts here.
"""
self.time_since_last_death += 1.0 / settings.FPS * dt * settings.TIME_MULTIPLIER
for food in self.food[:]:
if food not in self.food:
continue
food.update(dt)
for bot in self.bots[:]:
if bot not in self.bots:
continue
sensory_input = []
# This is where the bot's field of vision is put into action.
min_theta = bot.theta - Bot.FIELD_OF_VISION_THETA / 2
max_theta = bot.theta + Bot.FIELD_OF_VISION_THETA / 2
food_in_sight = False
for food in self.food:
if angle_is_between(find_angle(bot.x, bot.y, food.x, food.y), min_theta, max_theta):
food_in_sight = True
break
if food_in_sight:
sensory_input.append(1.0)
else:
sensory_input.append(0.0)
# Useful debugging outputs.
#print(bot.RGB)
#print(sensory_input)
bot.update(dt, sensory_input)
if self.time_since_last_death >= 5:
weakest = self.bots[0]
for bot in self.bots:
if bot.score < weakest.score:
weakest = bot
self.eliminate(weakest, replace = True)
class Bot:
"""
The representation of the circle thing with probes.
"""
# In pixels/pixels per second/revolutions per second/radians.
SPAWN_RADIUS = int(settings.WINDOW_WIDTH / 20) if settings.WINDOW_WIDTH <= settings.WINDOW_HEIGHT else int(settings.WINDOW_HEIGHT / 20)
HITBOX_RADIUS = 6
SPEED = 350.0
TURN_RATE = 2 * np.pi
FIELD_OF_VISION_THETA = 45 * np.pi / 180
# These lists represent the output from the neural network. Note that the
# output '[0, 0, 0, 1]' means "do nothing".
MOVE_FORWARD = [1, 0, 0, 0]
TURN_LEFT = [0, 1, 0, 0]
TURN_RIGHT = [0, 0, 1, 0]
def __init__(self, nnet, rgb, population):
self.nnet = copy.deepcopy(nnet)
self.RGB = rgb
self.pop = population
self.theta = np.random.uniform(0, 1) * 2 * np.pi
self.x = settings.WINDOW_WIDTH / 2.0 + Bot.SPAWN_RADIUS * np.random.uniform(0, 1) * np.cos(self.theta)
self.y = settings.WINDOW_HEIGHT / 2.0 + Bot.SPAWN_RADIUS * np.random.uniform(0, 1) * np.sin(self.theta)
self.score = 0.0
def _move_forward(self, dt):
self.x += Bot.SPEED / settings.FPS * dt * np.cos(self.theta) * settings.TIME_MULTIPLIER
self.y -= Bot.SPEED / settings.FPS * dt * np.sin(self.theta) * settings.TIME_MULTIPLIER
if self.x < -Bot.HITBOX_RADIUS * 6 or self.x > settings.WINDOW_WIDTH + Bot.HITBOX_RADIUS * 6 \
or self.y < -Bot.HITBOX_RADIUS * 6 or self.y > settings.WINDOW_HEIGHT + Bot.HITBOX_RADIUS * 6:
self.pop.eliminate(self, replace = True)
def _turn_left(self, dt):
self.theta += Bot.TURN_RATE / settings.FPS * dt * settings.TIME_MULTIPLIER
while self.theta >= 2 * np.pi:
self.theta -= 2 * np.pi
def _turn_right(self, dt):
self.theta -= Bot.TURN_RATE / settings.FPS * dt * settings.TIME_MULTIPLIER
while self.theta < 0:
self.theta += 2 * np.pi
def update(self, dt, sensory_input):
"""
Updates the bot's internals. "Hunger" can be thought of as a score
between '-1' and '1' where a greater value means less hungry.
"""
self.score -= 1.0 / settings.FPS / 10.0 * dt * settings.TIME_MULTIPLIER
if self.score < -1:
self.score = -1.0
self.nnet.feed_forward(sensory_input)
output = self.nnet.output()
if seq_is_equal(output, Bot.MOVE_FORWARD):
self._move_forward(dt)
elif seq_is_equal(output, Bot.TURN_LEFT):
self._turn_left(dt)
elif seq_is_equal(output, Bot.TURN_RIGHT):
self._turn_right(dt)
class Food:
"""
The representation of the red circles.
"""
# In pixels.
HITBOX_RADIUS = 5
RGB = (255, 0, 0)
def __init__(self, population):
mid_x = int(settings.WINDOW_WIDTH / 2)
mid_y = int(settings.WINDOW_HEIGHT / 2)
max_left_x = mid_x - (Bot.SPAWN_RADIUS + Bot.HITBOX_RADIUS + 5)
min_right_x = mid_x + (Bot.SPAWN_RADIUS + Bot.HITBOX_RADIUS + 5)
max_top_y = mid_y - (Bot.SPAWN_RADIUS + Bot.HITBOX_RADIUS + 5)
min_bottom_y = mid_y + (Bot.SPAWN_RADIUS + Bot.HITBOX_RADIUS + 5)
self.x = np.random.choice((np.random.uniform(0, max_left_x), np.random.uniform(min_right_x, settings.WINDOW_WIDTH)))
self.y = np.random.choice((np.random.uniform(0, max_top_y), np.random.uniform(min_bottom_y, settings.WINDOW_HEIGHT)))
self.pop = population
def update(self, dt):
"""
Updates the food's internals and handles bot<->food collision.
"""
for bot in self.pop.bots:
if distance_between(self.x, self.y, bot.x, bot.y) <= Bot.HITBOX_RADIUS + Food.HITBOX_RADIUS:
self.pop.feed(bot, self)
break
| StarcoderdataPython |
96603 | <gh_stars>1-10
'''This module implement deepracer boto client'''
import abc
import time
import random
import logging
import botocore
import boto3
from markov.log_handler.logger import Logger
from markov.constants import (NUM_RETRIES, CONNECT_TIMEOUT)
from markov.boto.constants import BOTO_ERROR_MSG_FORMAT
LOG = Logger(__name__, logging.INFO).get_logger()
class DeepRacerBotoClient(object):
"""Deepracer boto client class
"""
def __init__(self, region_name='us-east-1', max_retry_attempts=5,
backoff_time_sec=1.0, boto_client_name=None,
session=None):
"""Deepracer boto client class
Args:
region_name (str): aws region name
max_retry_attempts (int): max retry attempts for client call
backoff_time_sec (float): exp back off time between call
boto_client_name (str): boto client name
session (boto3.Session): An alternative session to use.
Defaults to None.
"""
self._region_name = region_name
self._max_retry_attempts = max_retry_attempts
self._backoff_time_sec = backoff_time_sec
self._boto_client_name = boto_client_name
self._session = session
def _get_boto_config(self):
"""Returns a botocore config object which specifies the number of times to retry"""
return botocore.config.Config(retries=dict(max_attempts=NUM_RETRIES),
connect_timeout=CONNECT_TIMEOUT)
def get_client(self):
"""Return boto client"""
if self._session:
# auto refresh session
s3_client = self._session.client(self._boto_client_name,
region_name=self._region_name,
config=self._get_boto_config())
else:
# new session per get client call
s3_client = boto3.Session().client(self._boto_client_name,
region_name=self._region_name,
config=self._get_boto_config())
return s3_client
def exp_backoff(self, action_method, **kwargs):
"""retry on action_method
Args:
action_method (method) : specific action method
**kwargs: argument for action_method
"""
# download with retry
try_count = 0
while True:
try:
return action_method(**kwargs)
except Exception as e:
try_count += 1
if try_count > self._max_retry_attempts:
raise e
# use exponential backoff
backoff_time = (pow(try_count, 2) + random.random()) * self._backoff_time_sec
error_message = BOTO_ERROR_MSG_FORMAT.format(self._boto_client_name,
backoff_time,
str(try_count),
str(self._max_retry_attempts),
e)
LOG.info(error_message)
time.sleep(backoff_time)
| StarcoderdataPython |
43021 | """Views fo the node settings page."""
# -*- coding: utf-8 -*-
import logging
import httplib as http
from dropbox.rest import ErrorResponse
from dropbox.client import DropboxClient
from urllib3.exceptions import MaxRetryError
from framework.exceptions import HTTPError
from website.addons.dropbox.serializer import DropboxSerializer
from website.addons.base import generic_views
logger = logging.getLogger(__name__)
debug = logger.debug
SHORT_NAME = 'dropbox'
FULL_NAME = 'Dropbox'
dropbox_account_list = generic_views.account_list(
SHORT_NAME,
DropboxSerializer
)
dropbox_import_auth = generic_views.import_auth(
SHORT_NAME,
DropboxSerializer
)
def _get_folders(node_addon, folder_id):
node = node_addon.owner
if folder_id is None:
return [{
'id': '/',
'path': '/',
'addon': 'dropbox',
'kind': 'folder',
'name': '/ (Full Dropbox)',
'urls': {
'folders': node.api_url_for('dropbox_folder_list', folderId='/'),
}
}]
client = DropboxClient(node_addon.external_account.oauth_key)
file_not_found = HTTPError(http.NOT_FOUND, data=dict(message_short='File not found',
message_long='The Dropbox file '
'you requested could not be found.'))
max_retry_error = HTTPError(http.REQUEST_TIMEOUT, data=dict(message_short='Request Timeout',
message_long='Dropbox could not be reached '
'at this time.'))
try:
metadata = client.metadata(folder_id)
except ErrorResponse:
raise file_not_found
except MaxRetryError:
raise max_retry_error
# Raise error if folder was deleted
if metadata.get('is_deleted'):
raise file_not_found
return [
{
'addon': 'dropbox',
'kind': 'folder',
'id': item['path'],
'name': item['path'].split('/')[-1],
'path': item['path'],
'urls': {
'folders': node.api_url_for('dropbox_folder_list', folderId=item['path']),
}
}
for item in metadata['contents']
if item['is_dir']
]
dropbox_folder_list = generic_views.folder_list(
SHORT_NAME,
FULL_NAME,
_get_folders
)
dropbox_get_config = generic_views.get_config(
SHORT_NAME,
DropboxSerializer
)
def _set_folder(node_addon, folder, auth):
uid = folder['id']
node_addon.set_folder(uid, auth=auth)
node_addon.save()
dropbox_set_config = generic_views.set_config(
SHORT_NAME,
FULL_NAME,
DropboxSerializer,
_set_folder
)
dropbox_deauthorize_node = generic_views.deauthorize_node(
SHORT_NAME
)
dropbox_root_folder = generic_views.root_folder(
SHORT_NAME
)
| StarcoderdataPython |
1669588 | <gh_stars>10-100
import torch
import numpy as np
from PIL import Image
import torchvision.transforms as transforms
from data.augmentations import Augmentation
from data import BaseTransform
import cv2
class LoadImage(object):
def __init__(self, space='BGR'):
self.space = space
def __call__(self, path_img):
return cv2.imread(path_img)
means = [0.485, 0.456, 0.406]
stds = [0.229, 0.224, 0.225]
# means = [0.485, 0.456, 0.485]
# stds = [0.229, 0.224, 0.225]
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
])
image_name = '/home/gurkirt/pic.jpg'
input_dim = 300
img_loader = LoadImage
imgs = np.asarray([cv2.imread(image_name)])
print(type(imgs), imgs.shape, '55555')
# imgs = Image.open(image_name)
# if imgs.mode != 'RGB':
# imgs = imgs.convert('RGB')
# imgs = np.asarray([np.asarray(imgs)])
ssd_transform = BaseTransform(input_dim, means, stds)
targets = np.asarray([[0,0,1,1,1], [0,0,1,1,1]])
print(np.min(imgs), np.max(imgs), np.std(imgs), np.mean(imgs))
imgs, boxes, labels = ssd_transform(imgs, targets[:, :4], targets[:, 4], 1)
# print(imgs[0].size(), ' 44444 ')
cvimage = imgs[0] #
# cvimage = torch.FloatTensor(torch.from_numpy(imgs[0]).permute(2, 0, 1))
img = Image.open(image_name)
if img.mode != 'RGB':
img = img.convert('RGB')
img = img.resize((input_dim, input_dim), Image.BILINEAR)
plimg = transform(img)
print(plimg.size(), plimg.type(), cvimage.size(), cvimage.type())
print(torch.std(plimg), torch.std(cvimage))
print(torch.mean(plimg), torch.mean(cvimage))
print(torch.sum(torch.abs(plimg-cvimage)))
print(cvimage[:,:1,:1])
print(plimg[:,:1,:1])
print(torch.max(plimg[:,:,:]-cvimage[:,:,:]))
| StarcoderdataPython |
3306903 | <reponame>oswald-pro/LocatePhoneNumber
import phonenumbers
import folium
from PhoneNumbers import number
from phonenumbers import geocoder
# Get your API key from https://opencagedata.com/
Api_key = '<YOUR API KEY>'
oswaldNumber = phonenumbers.parse(number)
# Get Country Location of the number
yourLacation = geocoder.description_for_number(oswaldNumber, 'fr')
print(yourLacation)
# Get service provider
from phonenumbers import carrier
service_provider = phonenumbers.parse(number)
print(carrier.name_for_number(service_provider, "fr"))
# Get latitude and longitude
from opencage.geocoder import OpenCageGeocode
geocoder = OpenCageGeocode(Api_key)
query = str(yourLacation)
results = geocoder.geocode(query)
#print(results)
lat = results[0]['geometry']['lat']
lng = results[0]['geometry']['lng']
print(lat,lng)
myMap = folium.Map(location=[lat, lng], zoom_start=13)
folium.Marker([lat, lng], popup=yourLacation).add_to(myMap)
# Save Map in HTML file
myMap.save("myLocation.html")
| StarcoderdataPython |
36366 | import argparse
import os
import torch
import matplotlib.pyplot as plt
from torch.utils.data.distributed import DistributedSampler
from torch import distributed as dist
from torch import optim
from tqdm import tqdm
from torch_ema import ExponentialMovingAverage
from cifr.core.config import Config
from cifr.models.builder import build_architecture, build_optimizer, build_dataset
from cifr.models.builder import build_discriminator
from cifr.models.losses.contextual_loss import ContextualLoss, ContextualBilateralLoss
from cifr.models.losses.gradient_norm import normalize_gradient
from cifr.models.losses.gan_loss import d_logistic_loss
from cifr.models.losses.gan_loss import g_nonsaturating_loss
from tools.utils import query_all_pixels
from tools.utils import requires_grad
from tools.utils import save_pred_img
WORK_DIR = './work_dir'
def synchronize():
if not dist.is_available() or not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def get_world_size():
if not dist.is_available() or not dist.is_initialized():
return 1
return dist.get_world_size()
def reduce_loss_dict(loss_dict):
world_size = get_world_size()
if world_size < 2:
return loss_dict
with torch.no_grad():
keys = []
losses = []
for k in sorted(loss_dict.keys()):
keys.append(k)
losses.append(loss_dict[k])
losses = torch.stack(losses, 0)
dist.reduce(losses, dst=0)
if dist.get_rank() == 0:
losses /= world_size
reduced_losses = {k: v for k, v in zip(keys, losses)}
return reduced_losses
def train(args, config, device):
model = build_architecture(config.model).to(device)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
encoder = build_architecture(config.encoder).to(device)
encoder = torch.nn.SyncBatchNorm.convert_sync_batchnorm(encoder)
disc = build_discriminator(config.discriminator).to(device)
disc = torch.nn.SyncBatchNorm.convert_sync_batchnorm(disc)
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
encoder = torch.nn.parallel.DistributedDataParallel(
encoder,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
disc = torch.nn.parallel.DistributedDataParallel(
disc,
device_ids=[dist.get_rank()],
output_device=dist.get_rank(),
broadcast_buffers=False
)
config.optimizer.update({'params': [
{'params': encoder.parameters()},
{'params': model.parameters()}
]})
optim_g = build_optimizer(config.optimizer)
config.optimizer.update({'params': disc.parameters()})
optim_d = build_optimizer(config.optimizer)
scheduler_g = optim.lr_scheduler.StepLR(optim_g, step_size=50, gamma=0.5)
scheduler_d = optim.lr_scheduler.StepLR(optim_d, step_size=50, gamma=0.5)
model_ema = ExponentialMovingAverage(model.parameters(), decay=0.995)
encoder_ema = ExponentialMovingAverage(encoder.parameters(), decay=0.995)
train_set_gan = build_dataset(config.train_dataset_gan)
train_set = build_dataset(config.train_dataset)
test_set = build_dataset(config.test_dataset)
train_loader_gan = torch.utils.data.DataLoader(
train_set_gan,
batch_size=config.batch_size,
num_workers=6,
drop_last=True,
sampler=DistributedSampler(train_set_gan, shuffle=True),
)
train_loader = torch.utils.data.DataLoader(
train_set,
batch_size=config.batch_size,
num_workers=6,
drop_last=True,
sampler=DistributedSampler(train_set, shuffle=True),
)
test_loader = torch.utils.data.DataLoader(
test_set,
batch_size=1,
num_workers=1
)
contextual_loss = ContextualLoss(use_vgg=True, vgg_layer="conv5_4").to(device)
loss_fn = torch.nn.L1Loss()
grad_norm_fn = normalize_gradient if config.discriminator_gradient_norm else lambda fn, x: fn(x)
config_name = os.path.splitext(os.path.basename(args.config))[0] if args.name is None else args.name
os.makedirs(f'{WORK_DIR}/{config_name}/images', exist_ok=True)
os.makedirs(f'{WORK_DIR}/{config_name}/checkpoints', exist_ok=True)
# config.dump(f'{WORK_DIR}/{config_name}/{config_name}.py')
rows = 20
cols = 3
fig = plt.figure(figsize=(15, rows*6))
total_iter = len(train_set) // config.batch_size // dist.get_world_size()
epoch_pbar = tqdm(
range(config.epoch),
total=config.epoch,
desc='Epoch',
position=0,
ncols=0,
disable=dist.get_rank()!=0
)
for epoch in epoch_pbar:
iter_pbar = tqdm(
enumerate(zip(train_loader, train_loader_gan)),
total=total_iter,
leave=False,
position=1,
ncols=0,
disable=dist.get_rank()!=0
)
for n, (batch, batch_gan) in iter_pbar:
encoder.train()
model.train()
disc.train()
lr = batch_gan['lr'].to(device)
coord = batch_gan['coord'].to(device)
cell = batch_gan['cell'].to(device)
real = batch_gan['real'].to(device)
#
# Generator Step
#
requires_grad(disc, False)
optim_g.zero_grad()
fake = query_all_pixels(encoder, model, lr, coord, cell, 1024)
fake_pred = grad_norm_fn(disc, fake)
ctx_loss = contextual_loss(fake, real)
loss_fake = g_nonsaturating_loss(fake_pred)
loss_g = ctx_loss + loss_fake
loss_g.backward()
query_inp = batch['inp'].to(device)
query_coord = batch['coord'].to(device)
query_cell = batch['cell'].to(device)
query_gt = batch['gt'].to(device)
feature = encoder(query_inp)
query_pred = model(query_inp, feature, query_coord, query_cell)
query_l1_loss = loss_fn(query_pred, query_gt)
query_l1_loss.backward()
optim_g.step()
encoder_ema.update()
model_ema.update()
#
# Discriminator Step
#
requires_grad(disc, True)
optim_d.zero_grad()
fake_pred = grad_norm_fn(disc, fake.detach())
real_pred = grad_norm_fn(disc, real)
loss_d = d_logistic_loss(real_pred, fake_pred)
loss_d.backward()
optim_d.step()
loss_dict = {
'd': loss_d,
'g': loss_g,
'g_ctx': ctx_loss,
'query_l1': query_l1_loss
}
reduced_loss = reduce_loss_dict(loss_dict)
if dist.get_rank() == 0:
loss_d = reduced_loss['d']
loss_g = reduced_loss['g']
ctx_loss = reduced_loss['g_ctx']
query_l1_loss = reduced_loss['query_l1']
loss_str = f'd: {loss_d:.4f};'
loss_str += f' g: {loss_g:.4f};'
loss_str += f' g_ctx: {ctx_loss:.4f}'
loss_str += f' query_l1: {query_l1_loss:.4f}'
iter_pbar.set_description(loss_str)
scheduler_g.step()
scheduler_d.step()
if dist.get_rank() == 0:
torch.save(
{
'encoder': encoder.module.state_dict(),
'model': model.module.state_dict(),
'encoder_ema': encoder_ema.state_dict(),
'model_ema': model_ema.state_dict(),
'discriminator': disc.module.state_dict(),
},
f'{WORK_DIR}/{config_name}/checkpoints/{epoch+1:0>6}.pth'
)
encoder_ema.store(encoder.parameters())
model_ema.store(model.parameters())
encoder_ema.copy_to(encoder.parameters())
model_ema.copy_to(model.parameters())
encoder.eval()
model.eval()
img_path = f'{WORK_DIR}/{config_name}/images/train_{epoch+1:0>6}.jpg'
save_pred_img(encoder, model, test_loader, img_path, fig, rows, cols)
encoder_ema.restore(encoder.parameters())
model_ema.restore(model.parameters())
iter_pbar.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--name', type=str, default=None)
args = parser.parse_args()
local_rank = int(os.environ['LOCAL_RANK'])
torch.cuda.set_device(local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
device = torch.device(f'cuda:{local_rank}')
synchronize()
cfg = Config.fromfile(args.config)
train(args, cfg, device)
| StarcoderdataPython |
1776549 | #!/usr/bin/env python3
import sys
from readers.read_ape import ApeReader
FILE_PATH = sys.argv[1]
ape_reader = ApeReader(FILE_PATH)
print('Ocorrencias do erro: {}'.format(len(ape_reader.error_lines)))
cores = list()
for k in ape_reader.corrections:
flat = [sub[1] for sub in k]
cores.append(flat)
print('Nenhuma sugestao de correcao: {}'.format(
len([x for x in cores if len(x) < 2])))
print('Efetivamente avaliadas: {}'.format(
len([x for x in cores if 'red' in x or 'green' in x or 'yellow' in x])))
print('Pelo menos uma sugestao correta: {}'.format(
len([x for x in cores if 'green' in x])))
print('Pelo menos uma sugestao parcialmente correta: {}'.format(
len([x for x in cores if 'yellow' in x])))
print('Pelo menos uma sugestao parcialmente correta e nenhuma correta: {}'.format(
len([x for x in cores if 'yellow' in x and 'green' not in x])))
print('Pelo menos uma sugestao errada: {}'.format(
len([x for x in cores if 'red' in x])))
print('Todas as sugestoes erradas: {}'.format(
len([x for x in cores if 'red' in x and 'green' not in x and 'yellow' not in x])))
| StarcoderdataPython |
42361 | #!/usr/bin/python2.6
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the ibmperf module.
If the IBM Performance Inspector tools are installed at "C:\ibmperf\bin" it
will run some tests using the actual tools. However, if the tools are not
installed it still runs a suite of tests using mocked versions of the tools.
"""
__author__ = "<EMAIL> (<NAME>)"
import ibmperf
import logging
import os
import random
import unittest
class MockPopen(object):
"""A mock subprocess.Popen object.
Implements "returncode" and "communicate", the only attributes/routines
used by the ibmperf module.
Attributes:
returncode: The return code of the mocked sub-process.
"""
def __init__(self, stdout="", stderr="", returncode=0,
raise_on_init=None, raise_on_communicate=None):
"""Initializes this mock Popen object with the given output and returncode.
Args:
stdout: The data to return for stdout in "communicate".
stderr: The data to return for stderr in "communicate".
returncode: The return code to expose via the "returncode" attribute.
raise_on_init: If this is not None, will cause the constructor to raise
an error. Expected to be a 2-tuple, containing (type, args), and will
call "raise type(args)".
raise_on_communicate: Similar to raise_on_init, but will cause the error
to be raised on calls to "communicate".
"""
if raise_on_init:
raise raise_on_init[0](*raise_on_init[1])
self._stdout = stdout
self._stderr = stderr
self.returncode = returncode
self._raise_on_communicate = raise_on_communicate
def communicate(self):
"""Simulates running the command, returning its stdout and stderr.
Raises an exception if raise_on_communicate was specified in the
constructor.
"""
if self._raise_on_communicate:
return self._raise_on_communicate[0](*self._raise_on_communicate[1])
return (self._stdout, self._stderr)
class MockHardwarePerformanceCounter(ibmperf.HardwarePerformanceCounter):
"""A mocked ibmperf.HardwarePerformanceCounter object.
Replaces the _Popen member function with one that returns canned results.
"""
def __init__(self, popen_results, *args, **kwargs):
"""Initializes the mock HardwarePerformanceCounter object.
Passes args and kwargs directly through to the
ibmperf.HardwarePerformanceCounter initializer.
Args:
popen_results: A list of (type, args, kwargs) 3-tuples that will be
returned from calls to _Popen, in order.
"""
self._popen_results = list(popen_results)
super(MockHardwarePerformanceCounter, self).__init__(*args, **kwargs)
def AddPopenResult(self, result_tuple):
"""Adds the given result tuple to the queue of results to return.
Args:
result_tuple: A (type, args, kwargs) triplet.
"""
self._popen_results.append(result_tuple)
def _Popen(self, dummy_command_line):
"""Overrides _Popen from ibmperf.HardwarePerformanceCounter.
Returns the mocked object from the head of the _popen_results queue.
"""
object_type, args, kwargs = self._popen_results.pop(0)
return object_type(*args, **kwargs)
# A few specific metric names.
_CYCLES = "CYCLES"
_UOPS = "UOPS"
# A list of metrics that we will simulate supporting.
_METRICS = {
_CYCLES: None,
"NONHALTED_CYCLES": ("Number of cycles during which the processor is not "
"halted (and not in Thermal Trip on Pentium Ms)"),
"INSTR": "Number of instructions retired",
_UOPS: "Number of uOps retired",
"BRANCH": "Number of branch instruction retired",
"MISPRED_BRANCH": "Number of mispredicted branch instructions retired"}
# A generic command that is successful outputs nothing and returns the default
# error code of 0.
_GENERIC_SUCCESS = (MockPopen, [], {})
# Simulates a successful run of "ddq", indicating that the toolkit is
# installed.
_DDQ_INSTALLED = _GENERIC_SUCCESS
# The simulated output of a successful call to "ptt".
_PTT_OUTPUT = "\n".join([" - %s" % _metric for _metric in _METRICS])
_PTT_SUCCESS = (MockPopen, [], {"stdout": _PTT_OUTPUT})
# The simulated output of a successful call to "mpevt -ld".
_MPEVT_OUTPUT = "Id Name Description\n-- ---- -----------"
for i, _metric in enumerate(_METRICS):
desc = _METRICS[_metric]
if desc:
_MPEVT_OUTPUT += "\n%d %s %s" % (100 + i, _metric, desc)
_MPEVT_SUCCESS = (MockPopen, [], {"stdout": _MPEVT_OUTPUT, "returncode": -1})
# This is a set of MockPopen results that imitates a successful initialization
# of the toolkit.
_SUCCESSFUL_INIT = [_DDQ_INSTALLED, _PTT_SUCCESS, _MPEVT_SUCCESS]
def _CreateQueryResults(metrics):
"""Returns a set of made up results for the given metrics.
Args:
metrics: An iterable collection of metric names.
"""
results = {}
pids = [1015, 1016]
for metric in metrics:
pid_results = {}
for pid in pids:
pid_results[pid] = random.randint(100000, 1000000)
results[metric] = pid_results
return results
def _CreateQueryStdout(results):
"""Returns a "ptt dump" stdout for the given dict of results.
See ibmperf.py for a full listing of sample output.
Args:
results: A dict of results as returned by
ibmperf.HardwarePerformanceCounters.Query.
"""
stdout = "***** ptt v2.0.8 for x86 ***** pid=1944/0x798 *****\n"
stdout += "\n"
pids = results[results.keys()[0]].keys()
for pid in pids:
stdout += " PID %d is foo\n" % pid
stdout += "\n"
stdout += "PTT Facility Per-Thread Information\n"
stdout += "-----------------------------------\n"
stdout += "\n"
stdout += " PID TID Disp Intr"
for metric in results:
stdout += " %s" % metric
stdout += "\n"
stdout += " --- --- ---- ----"
for metric in results:
stdout += " %s" % ("-" * len(metric))
stdout += "\n"
for pid in pids:
tid = random.randint(100, 1000)
disp = random.randint(1, 10000)
intr = random.randint(1, 10000)
metric_values = ""
for metric in results:
metric_values += " %d" % results[metric][pid]
stdout += " %d %d %d %d%s\n" % (pid, tid, disp, intr, metric_values)
stdout += " "
stdout += "-".join("%s" % ("-" * len(metric)) for metric in results)
stdout += "\n"
stdout += " "
stdout += metric_values
stdout += "\n\n"
stdout += "Execution ended: 1 iterations.\n"
return stdout
class TestHardwarePerformanceCounter(unittest.TestCase):
"""Unittests for ibmperf.HardwarePerformanceCounter."""
def setUp(self):
# By default we create a mock HardwarePerformanceCounter object that
# successfully initializes the toolkit.
self._hpc = MockHardwarePerformanceCounter(
_SUCCESSFUL_INIT)
def _TestStart(self, metrics):
"""Utility function for starting data collection.
Args:
metrics: Iterable collection of metrics to be started.
"""
self._hpc.AddPopenResult(_GENERIC_SUCCESS) # ptt term
self._hpc.AddPopenResult(_GENERIC_SUCCESS) # ptt noautoterm
self._hpc.AddPopenResult(_GENERIC_SUCCESS) # ptt init
self._hpc.Start(metrics)
def _TestStop(self):
"""Utility function for stopping data collection."""
self._hpc.AddPopenResult(_GENERIC_SUCCESS) # ptt term
self._hpc.Stop()
# Pylint complains that this need not be a member function, but the
# unittest machinery requires this.
# pylint: disable=R0201
def testInstallsIfNotInstalled(self):
MockHardwarePerformanceCounter(
[(MockPopen, [], {"returncode": -1}), # ddq failure.
(MockPopen, [], {"returncode": 0}), # tinstall success.
_PTT_SUCCESS, _MPEVT_SUCCESS])
def testFailedInstall(self):
self.assertRaises(ibmperf.ExecutionFailed,
MockHardwarePerformanceCounter,
[(MockPopen, [], {"returncode": -1}), # ddq failure.
(MockPopen, [], {"returncode": -1})]) # tinstall failure.
def testHaveMetrics(self):
self.assertEqual(set(_METRICS.keys()), set(self._hpc.metrics.keys()))
def testQueryFailsWhenNotRunning(self):
self.assertRaises(ibmperf.NotRunning, self._hpc.Query, "foo")
def testStopFailsWhenNotRunning(self):
self.assertRaises(ibmperf.NotRunning, self._hpc.Stop)
def testStartFailsOnInvalidMetric(self):
self.assertRaises(ibmperf.InvalidMetric,
self._TestStart,
["INVALID_METRIC_NAME"])
def testAllMetricsCanBeStartedIndividually(self):
for name in self._hpc.metrics:
self._TestStart([name])
self._TestStop()
def testDumpFails(self):
self._TestStart([_CYCLES])
# ptt returns 210 when it fails.
self._hpc.AddPopenResult((MockPopen, [], {"returncode": 210}))
self.assertRaises(ibmperf.ExecutionFailed,
MockHardwarePerformanceCounter.Query,
self._hpc,
"foo")
def testUnexpectedDumpOutput(self):
self._TestStart([_CYCLES])
stdout = "This is garbage, and is not parsable."
self._hpc.AddPopenResult((MockPopen, [], {"stdout": stdout}))
self.assertRaises(ibmperf.UnexpectedOutput,
MockHardwarePerformanceCounter.Query,
self._hpc,
"foo")
def testWrongMetricsDumped(self):
self._TestStart([_CYCLES])
results = _CreateQueryResults([_UOPS])
stdout = _CreateQueryStdout(results)
self._hpc.AddPopenResult((MockPopen, [], {"stdout": stdout}))
self.assertRaises(ibmperf.UnexpectedOutput,
MockHardwarePerformanceCounter.Query,
self._hpc,
"foo")
def _TestMetricsFully(self, metrics):
"""Collects the provided metrics for an imaginary process 'foo'.
This helper function starts the metrics, sleeps for 2 seconds, queries them
and finally stops them. It ensures that the reported metrics match those
that were requested to be collected.
Args:
metrics: Iterable collection of metrics to be started.
"""
self._TestStart(metrics)
expected_results = _CreateQueryResults(metrics)
query_stdout = _CreateQueryStdout(expected_results)
self._hpc.AddPopenResult((MockPopen, [], {"stdout": query_stdout}))
results = self._hpc.Query("foo")
self.assertTrue(isinstance(results, dict))
self.assertEqual(expected_results, results)
self._TestStop()
def testOneMetricFully(self):
name = self._hpc.metrics.keys()[0]
self._TestMetricsFully([name])
def _GetMaximalMetrics(self):
"""Helper function that returns a set of maximal metrics.
This returns all free metrics, plus max_counters non-free metrics.
"""
metrics = list(self._hpc.free_metrics)
metrics += list(self._hpc.non_free_metrics)[0:self._hpc.max_counters]
return metrics
def testMaximalMetricsFully(self):
metrics = self._GetMaximalMetrics()
self._TestMetricsFully(metrics)
def testMaximalMetricsFullyForReal(self):
# Only run this test if the toolkit is actually present at the
# default path.
if (not os.path.isdir(ibmperf.DEFAULT_DIR) or
not os.path.exists(os.path.join(ibmperf.DEFAULT_DIR, 'ddq.exe'))):
return
self._hpc = ibmperf.HardwarePerformanceCounter()
metrics = self._GetMaximalMetrics()
self._hpc.Start(metrics)
try:
results = self._hpc.Query("python")
self.assertTrue(isinstance(results, dict))
self.assertEqual(set(metrics), set(results))
except ibmperf.ExecutionFailed:
# We swallow this error, as it can happen if the local machine doesn't
# actually support per-thread metrics. Some versions of Windows don't.
pass
self._hpc.Stop()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.