text stringlengths 38 1.54M |
|---|
with open('zoo.csv')as csv_file:
csv_reader = csv.reader(csv_file,delimiter=',')
next(csv_reader)
#to skip the header from csv file
for row in csv_reader:
#print(row)
if row[0] not in animal_dict.keys():
animal_dict[row[0]]= int(row[2])
else:
animal_dict[row[0]] = animal_dict[row[0]]+int(row[2])
print"all animals water needs are \n",animal_dict
or key,values in animal_dict.items():
print (key,values)
|
import unittest
from flask import url_for
from flask_testing import TestCase
from application import app, db
from application.models import Ingredients, Method, Recipe
class TestBase( TestCase ):
def create_app(self):
app.config.update( SQLALCHEMY_DATABASE_URI="sqlite:///",
SECRET_KEY='TEST_SECRET_KEY',
DEBUG=True
)
return app
def setUp(self):
db.create_all()
ingedient1 = Ingredients(name="Milk")
method1 = Method(steps="hello", time="40mins")
db.session.add(sample1)
db.session.add(method1)
db.session.commit()
recipe1= Recipe(name="recipe", ingredient_id=sample1.id, quantity="100g", method_id=method1.id)
db.session.add(recipe1)
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
class TestViews( TestBase ):
def test_home_get(self):
response = self.client.get(url_for('index'))
self.assertEqual(response.status_code, 200)
def test_update_get(self):
response = self.client.get(url_for('update', recipe_id=1) )
self.assertEqual(response.status_code, 200)
def test_delete_get(self):
response = self.client.get(url_for('delete', recipe_id=1))
self.assertEqual(response.status_code, 302)
def test_addIn_get(self):
response = self.client.get(url_for('addIn'))
self.assertEqual(response.status_code, 200)
def test_ingredients_get(self):
response = self.client.get(url_for('ingredients'))
self.assertEqual(response.status_code, 200)
class TestAdd( TestBase ) :
def test_add_ingredient(self):
response = self.client.post(
url_for('addIn'),
data = dict(name="Eggs")
)
self.assertIn(b'Eggs', response.data)
def test_add_methods(self):
response = self.client.post(
url_for('methods', method_id=1),
data = dict(steps="step 1", time="1 hour"),
follow_redirects=True
)
self.assertIn(b'step 1', response.data)
self.assertIn(b'1 hour', response.data)
def test_add_recipe(self):
response = self.client.post(
url_for('recipes'),
data = dict(name="Chilli Con Carne", ingredient_id=2, quantity="500g", method_id=2),
follow_redirects=True
)
self.assertIn(b'Chilli Con Carne', response.data)
self.assertIn(b'2', response.data)
self.assertIn(b'500g', response.data)
self.assertIn(b'2', response.data)
def test_addIn_not_valid(self):
response = self.client.post(
url_for('addIn'),
data = dict(name="13/11/2020"),
follow_redirects=True
)
self.assertEqual(response.status_code, 405)
class TestUpdate( TestBase ):
def test_update_recipe(self):
response = self.client.post(
url_for('update', recipe_id=1),
data = dict(oldname="Lasagne", newname="Lasagne serves 42"),
follow_redirects=True
)
self.assertEqual(response.status_code,200)
class TestDelete( TestBase ) :
def test_delete_recipe(self):
response = self.client.post(
url_for('delete', recipe_id=1),
data = dict(name="Pasta"),
follow_redirects=True
)
self.assertEqual(response.status_code,405)
|
#1. Подсчитать, сколько было выделено памяти под переменные в ранее разработанных
# программах в рамках первых трех уроков. Проанализировать результат и определить
# программы с наиболее эффективным использованием памяти.
#Вариант_1: Общая сумма занимаего места = 392
#Вариант_2: Общая сумма занимаего места = 22344
#Вариант_3: Общая сумма занимаего места = 3160
#Вывод: с позиции использования памяти вариант номер 1 самый эффективный. Потому что
# информация хранится здесь везде в качетстве цифр и занимает минимальное кол-во памяти.
#На 2 месте вариант номер 3 со словарем, его плюс это то что там могут хранится одновременно
#несколько максимальных чисел и выводятся тоже несколько. В варианте номер три боюсь, что то
# не правильно срабатывает в подсчете памяти засчет рекурсии. Буду благодарен, если кто подскажет.
import random, sys, re
print(f'Python 3.7.3, ОС: MacOS Mojave 10.14.6 ')
print()
n=random.randint(10,100) #добавил, чтобы число выбиралось случайным образом для всех 3 х вариантов одинаковое
def show_size(x,result=[]):
#print('\t'*level, f'type={x.__class__},size={sys.getsizeof(x)}, object={x}')
a=f'type={x.__class__}//size={sys.getsizeof(x)}//object={x}'
result.append(a)
return result
print('Вариант_1:')
def biggest_loop(n):
biggest_sum=0
while True:
for i in range(n):
#number=int(input("Введите натуральное число: "))
#number = random.randint(1, 1000000)
number=50-i #чтобы сравнение было по равным числам
num=number
total_sum=0
inside=[]
inside.extend(show_size(num)) #память переменной
while number>0:
digit = number % 10
total_sum=total_sum+digit
number //= 10
if total_sum>biggest_sum:
biggest_sum=total_sum
biggest_num=num
inside.extend(show_size(biggest_sum)) #память переменной
inside.extend(show_size(biggest_num)) #память переменной
if (i+1)==n:
break
return biggest_sum, biggest_num, inside
x=biggest_loop(10)
print(f'Наибольшее число по сумме цифр является {x[1]}. Сумма цифр {x[0]}.')
print()
#Подсчет занимаемой памяти
sum_1=0
for y in x[2]:
print(y)
size=re.search(r'(\d+)',y)
sum_1+=int(size.group())
print(f'Общая сумма занимаего места = {sum_1}')
print()
print('Вариант_2:')
def biggest_rec(n,biggest_sum=0, biggest_num=0,inside=[]):
for i in reversed(range(n)):
#number = int(input("Введите натуральное число: "))
#number=random.randint(1,1000000)
number=50-i #чтобы сравнение было по равным числам
saved_number=number
total_sum=0
inside.extend(show_size(saved_number)) #память переменной
while number > 0:
digit=number % 10
total_sum=total_sum + digit
number//=10
if total_sum > biggest_sum:
biggest_sum = total_sum
biggest_num = saved_number
inside.extend(show_size(biggest_sum)) #память переменной
inside.extend(show_size(biggest_num)) #память переменной
if i==0:
return biggest_sum, biggest_num, inside
biggest_sum, biggest_num,inside = biggest_rec(n-1,biggest_sum, biggest_num, inside) #Важно запомнить !
return biggest_sum, biggest_num, inside
x=biggest_rec(10)
print(f'Наибольшее число по сумме цифр является {x[1]}. Сумма цифр {x[0]}.')
#Подсчет занимаемой памяти
sum_2=0
for y in x[2]:
print(y)
size=re.search(r'(\d+)',y)
sum_2+=int(size.group())
print(f'Общая сумма занимаего места = {sum_2}')
print()
print('Вариант_3:')
def biggest_dict(n,biggest_sum=0, biggest_num=0):
numbers = []
sums = []
for i in reversed(range(n)):
# number = int(input("Введите натуральное число: "))
#number=random.randint(1,1000000)
number = 50-i #чтобы сравнение было по равным числам
numbers.append(number)
total_sum = 0
inside = []
while number > 0:
digit = number % 10
total_sum = total_sum + digit
number //= 10
sums.append(total_sum)
if i == 0:
l_max=[]
d = dict(zip(numbers, sums))
inside.extend(show_size(d)) #память переменной
for key, val in d.items():
if not (l_max):
l_max.append((key, val))
elif l_max[0][1] < val:
l_max = [(key, val)]
elif l_max[0][1] == val:
l_max.append((key, val))
inside.extend(show_size(l_max)) #память переменной
return l_max, inside
x=biggest_dict(10)
print(f'Наибольшее число по сумме цифр является {[i[0] for i in x[0]]}. Сумма цифр {x[0][0][1]}.')
sum_3=0
for y in x[1]:
print(y)
size=re.search(r'(\d+)',y)
sum_3+=int(size.group())
print(f'Общая сумма занимаего места = {sum_3}')
print() |
def display_hangman(tries):
stages = [ # final state: head, torso, both arms, and both legs
"""
--------
| |
| O
| \\|/
| |
| / \\
-
""",
# head, torso, both arms, and one leg
"""
--------
| |
| O
| \\|/
| |
| /
-
""",
# head, torso, and both arms
"""
--------
| |
| O
| \\|/
| |
|
-
""",
# head, torso, and one arm
"""
--------
| |
| O
| \\|
| |
|
-
""",
# head and torso
"""
--------
| |
| O
| |
| |
|
-
""",
# head
"""
--------
| |
| O
|
|
|
-
""",
# initial empty state
"""
--------
| |
|
|
|
|
-
"""
]
return stages[tries]
def get_guess(guessed_letters,guessed_words):
keep_guessing = True
while keep_guessing == True:
guess = input("Enter a letter or a word: ")
# guessed a letter
if len(guess) == 1:
if guess in guessed_letters:
print("you have already guessed that letter")
keep_guessing = True
else:
keep_guessing = False
#we guessed a word
else:
if guess in guessed_words:
print("you have already guessed that word")
keep_guessing =True
else:
keep_guessing = False
return guess
def display_word_completion(word_completion):
for letter in word_completion:
print('',letter,'',end = '')
def game(word,tries):
#setup
keep_playing = True
guessed_letters = []
guessed_words= []
word = word.lower()
word_completion = '-'*len(word)
while keep_playing == True:
hangman_graphic = display_hangman(tries)
print(hangman_graphic)
display_word_completion(word_completion)
print("\n")
guess = get_guess(guessed_letters,guessed_words)
guess= guess.lower()
if len(guess) == 1:
guessed_letters.append(guess)
#if they guessed a correct letter
if guess in word:
word_indecies = []
for i in range(len(word)):
if guess == word[i]:
word_indecies.append(i)
temp_word_completion = ""
for i in range(len(word_completion)):
if i in word_indecies:
temp_word_completion = temp_word_completion + word[i]
else:
temp_word_completion = temp_word_completion + word_completion[i]
word_completion = temp_word_completion
# if the letter isnt in the word
else:
print("was not in the word")
tries = tries - 1
# a problem for later
else:
guessed_words.append(guess)
if guess == word:
word_completion = word
print(hangman_graphic)
display_word_completion(word_completion)
print("\n")
print("YOU WIN")
else:
print("That was not the word")
tries = tries - 1
#if you lose
if tries <0:
print("you lose")
break
if word_completion == word:
print(hangman_graphic)
display_word_completion(word_completion)
print("YOU WIN")
break
def main():
print("WELCOME to hangman")
game("Toronto",6)
main() |
from os import system
exe = "lencod.exe"
cfg = "encoder_ldp.cfg"
videos = [["BasketballDrive_1920x1080_50","5","50","1920","1080"],["BQTerrace_1920x1080_60","5","60","1920","1080"],["Cactus_1920x1080_50","5","50","1920","1080"],["Jockey_3840x2160","5","120","3840","2160"],["ShakeNDry_3840x2160","5","120","3840","2160"],["HoneyBee_3840x2160","5","120","3840","2160"]]
#videos = [["Jockey_3840x2160","5","120","3840","2160"]]
#videos = [["BasketballDrive_1920x1080_50","5","50","1920","1080"]]
sizes_btrfly = ["0","4","8","16","32"]
alfs = ["0"]
qps = ["32"]
fps = {}
fps["24"] = "2"
fps["25"] = "3"
fps["30"] = "5"
fps["50"] = "6"
fps["60"] = "8"
fps["120"] = "9"
for video in videos:
for size_btrfly in sizes_btrfly:
for alf in alfs:
for qp in qps:
if (alf == "0" and size_btrfly == "0") or (alf == "1" and size_btrfly != "0"):
continue
name_video = video[0].split("_")[0]
cmd = "./%s -f %s -p QPPFrame=%s -p InputFile= ~/origCfP/%s.yuv -p SizePrint=%s -p PrintALF=%s -p FramesToBeEncoded=%s -p SourceWidth=%s -p SourceHeight=%s -p ReconFile=enc_%s.yuv -p OutputFile=video_%s.avs -p FrameRate=%s" % (exe, cfg, qp, video[0], size_btrfly, alf, video[1], video[3], video[4], name_video, name_video, fps[video[2]])
#print cmd
system(cmd)
|
## Amanda Roberts 2017/08/01
#This code utilizes machine learning to make a prediction of LAI values at ORNL
#.tif files from the BRDF flight performed there are used to "train" the Random Forest Regressor
#The regressor then gets data from the full site and predicts LAI
#Import packages needed
import numpy as np
import gdal, osr
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
#array2raster converts an array to a geotiff
#newRasterfn - name of the output geotiff
#rasterOrigin - the coordinates of the starting corner of the desired raster
#pixelWidth - which direction the raster should be written (-1 to the left, 1 to the right)
#pixelHeight - which direction the raster should be written (-1 down/South, 1 up/North)
#array - the array to be turned into a raster
#epsg -
def array2raster(newRasterfn, rasterOrigin, pixelWidth, pixelHeight, array, epsg):
cols = array.shape[1]
rows = array.shape[0]
originX = rasterOrigin[0]
originY = rasterOrigin[1]
driver = gdal.GetDriverByName('GTiff')
outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32)
outband = outRaster.GetRasterBand(1)
outband.WriteArray(array)
outRasterSRS = osr.SpatialReference()
outRasterSRS.ImportFromEPSG(epsg)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache()
#Create string variables for the location of the .tif files
uncertaintyLAI = 'S:/Users/aroberts/ORNL/Spectrometer/Deblur/DerivedIndices/LAI_Results_Normal/ORNL_std.tif'
meanNDWI = 'S:/Users/aroberts/ORNL/Spectrometer/Deblur/WaterIndices/NDWI_Results_Normal/ORNL_mean.tif'
meanNDVI = 'S:/Users/aroberts/ORNL/Spectrometer/Deblur/VegetationIndices/NDVI_Results_Normal/ORNL_mean.tif'
meanLAI = 'S:/Users/aroberts/ORNL/Spectrometer/Deblur/DerivedIndices/LAI_Results_Normal/ORNL_mean.tif'
meanAlbedo = 'S:/Users/aroberts/ORNL/Spectrometer/Deblur/DerivedIndices/Albedo_Results_Normal/ORNL_mean.tif'
#Open the files with GDAL
uncertaintyLAI_dataset = gdal.Open(uncertaintyLAI)
meanNDWI_dataset = gdal.Open(meanNDWI)
meanNDVI_dataset = gdal.Open(meanNDVI)
meanLAI_dataset = gdal.Open(meanLAI)
meanAlbedo_dataset = gdal.Open(meanAlbedo)
#Get the needed metadata from the file
cols_uncertaintyLAI = uncertaintyLAI_dataset.RasterXSize
rows_uncertaintyLAI = uncertaintyLAI_dataset.RasterYSize
#Get the raster band object
uncertaintyLAI_raster = uncertaintyLAI_dataset.GetRasterBand(1)
meanNDWI_raster = meanNDWI_dataset.GetRasterBand(1)
meanNDVI_raster = meanNDVI_dataset.GetRasterBand(1)
meanLAI_raster = meanLAI_dataset.GetRasterBand(1)
meanAlbedo_raster = meanAlbedo_dataset.GetRasterBand(1)
#Get no data value
noDataVal = uncertaintyLAI_raster.GetNoDataValue()
#Make an array from the raster
uncertaintyLAI_array = uncertaintyLAI_raster.ReadAsArray(0, 0, cols_uncertaintyLAI, rows_uncertaintyLAI).astype(np.float)
meanNDWI_array = meanNDWI_raster.ReadAsArray(0, 0, cols_uncertaintyLAI, rows_uncertaintyLAI).astype(np.float)
meanNDVI_array = meanNDVI_raster.ReadAsArray(0, 0, cols_uncertaintyLAI, rows_uncertaintyLAI).astype(np.float)
meanLAI_array = meanLAI_raster.ReadAsArray(0, 0, cols_uncertaintyLAI, rows_uncertaintyLAI).astype(np.float)
meanAlbedo_array = meanAlbedo_raster.ReadAsArray(0, 0, cols_uncertaintyLAI, rows_uncertaintyLAI).astype(np.float)
#Get the shape
shape = uncertaintyLAI_array.shape
#Get how many rows are in the array
rows_for_training = int(shape[0])
#Make training data
predictor_data = uncertaintyLAI_array[:rows_for_training]
training_data_1 = meanNDWI_array[:rows_for_training]
training_data_2 = meanNDVI_array[:rows_for_training]
training_data_3 = meanLAI_array[:rows_for_training]
training_data_7 = meanAlbedo_array[:rows_for_training]
#Turn into vectors, which are needed for processing
predictor_data_vec = np.reshape(predictor_data, (rows_for_training * shape[1], 1))
training_data_1_vec = np.reshape(training_data_1, (rows_for_training * shape[1], 1))
training_data_2_vec = np.reshape(training_data_2, (rows_for_training * shape[1], 1))
training_data_3_vec = np.reshape(training_data_3, (rows_for_training * shape[1], 1))
training_data_7_vec = np.reshape(training_data_7, (rows_for_training * shape[1], 1))
#Set the values that are unrealistically high and 0 (indicitive of values too high for the equation used to derive LAI) to the 99th percentile
training_data_3_vec[training_data_3_vec == 0] = np.percentile(meanLAI_array[meanLAI_array != -9999], 99)
training_data_3_vec[training_data_3_vec > np.percentile(meanLAI_array[meanLAI_array != -9999], 99)] = \
np.percentile(meanLAI_array[meanLAI_array != -9999], 99)
#Get all the values that aren't -9999
goodValues_predictor = np.where(predictor_data_vec != noDataVal)
goodValues_training1 = np.where(training_data_1_vec != noDataVal)
goodValues_training2 = np.where(training_data_2_vec != noDataVal)
goodValues_training3 = np.where(training_data_3_vec != noDataVal)
goodValues_training7 = np.where(training_data_7_vec != noDataVal)
#Print out how many good values it has
print('Predictor good values ', np.count_nonzero(predictor_data_vec != noDataVal))
print('Training 1 good values: ', np.count_nonzero(training_data_1_vec != noDataVal))
print('Training 2 good values: ', np.count_nonzero(training_data_2_vec != noDataVal))
print('Training 3 good values: ', np.count_nonzero(training_data_3_vec != noDataVal))
print('Training 7 good values: ', np.count_nonzero(training_data_7_vec != noDataVal))
#Create a variable that records where the good values overlap
intersect = np.intersect1d(goodValues_predictor[0], goodValues_training1[0])
intersect = np.intersect1d(intersect, goodValues_training2[0])
intersect = np.intersect1d(intersect, goodValues_training3[0])
intersect = np.intersect1d(intersect, goodValues_training7[0])
print(np.count_nonzero(intersect))
#Combine all the overlapping data into one array
all_training_data = np.concatenate([training_data_1_vec[intersect], training_data_2_vec[intersect], \
training_data_3_vec[intersect],training_data_7_vec[intersect]], axis=1)
predictor_data_vec = predictor_data_vec[intersect]
#Define parameters for Random Forest Regressor; the depth is the max number of branches the forest can have
max_depth = 10
#Define regressor rules
regr_rf = RandomForestRegressor(max_depth = max_depth, random_state=2)
#Fit the data to regressor variables
regr_rf.fit(all_training_data, predictor_data_vec.flatten())
##Start getting information for the whole site files
#String variable that stores the location of the tif file
meanFullSiteNDWI = 'S:/Users/aroberts/ORNL/FullMosiac/Spectrometer/NDWI.tif'
meanFullSiteNDVI = 'S:/Users/aroberts/ORNL/FullMosiac/Spectrometer/NDVI.tif'
meanFullSiteLAI = 'S:/Users/aroberts/ORNL/FullMosiac/Spectrometer/LAI/LAI.tif'
meanFullSiteAlbedo = 'S:/Users/aroberts/ORNL/FullMosiac/Spectrometer/ALBD.tif'
#Open the files with GDAL
meanFullSiteNDWI_dataset = gdal.Open(meanFullSiteNDWI)
meanFullSiteNDVI_dataset = gdal.Open(meanFullSiteNDVI)
meanFullSiteLAI_dataset = gdal.Open(meanFullSiteLAI)
meanFullSiteAlbedo_dataset = gdal.Open(meanFullSiteAlbedo)
#Get the raster from the file
meanFullSiteNDWI_raster = meanFullSiteNDWI_dataset.GetRasterBand(1)
meanFullSiteNDVI_raster = meanFullSiteNDVI_dataset.GetRasterBand(1)
meanFullSiteLAI_raster = meanFullSiteLAI_dataset.GetRasterBand(1)
meanFullSiteAlbedo_raster = meanFullSiteAlbedo_dataset.GetRasterBand(1)
#Get the needed metadata
cols_fullSite = meanFullSiteNDWI_dataset.RasterXSize
rows_fullSite = meanFullSiteNDWI_dataset.RasterYSize
#Create an array from the raster
meanFullSiteNDWI_array = meanFullSiteNDWI_raster.ReadAsArray(0, 0, cols_fullSite, rows_fullSite).astype(np.float)
meanFullSiteNDVI_array = meanFullSiteNDVI_raster.ReadAsArray(0, 0, cols_fullSite, rows_fullSite).astype(np.float)
meanFullSiteLAI_array = meanFullSiteLAI_raster.ReadAsArray(0, 0, cols_fullSite, rows_fullSite).astype(np.float)
meanFullSiteAlbedo_array = meanFullSiteAlbedo_raster.ReadAsArray(0, 0, cols_fullSite, rows_fullSite).astype(np.float)
#Get the shape of the array
shapeFull = meanFullSiteNDWI_array.shape
#Get the number of rows in the array
fullSize = int(shapeFull[0])
#Make validation data
#validation_predictor = uncertaintyLAI_array[rows_for_training+1:]
validation_data_1 = meanFullSiteNDWI_array[:fullSize]
validation_data_2 = meanFullSiteNDVI_array[:fullSize]
validation_data_3 = meanFullSiteLAI_array[:fullSize]
validation_data_7 = meanFullSiteAlbedo_array[:fullSize]
#Turn into vectors
#validation_predictor_vec = np.reshape(validation_predictor, ((shape[0] - rows_for_training - 1) * shape[1], 1))
validation_data_1_vec = np.reshape(validation_data_1, ((shapeFull[0]) * shapeFull[1], 1))
validation_data_2_vec = np.reshape(validation_data_2, ((shapeFull[0]) * shapeFull[1], 1))
validation_data_3_vec = np.reshape(validation_data_3, ((shapeFull[0]) * shapeFull[1], 1))
validation_data_7_vec = np.reshape(validation_data_7, ((shapeFull[0]) * shapeFull[1], 1))
#Get good data points
#goodValues_validation_predictor = np.where(np.isnan(validation_predictor_vec != noDataVal)
goodValues_validation1 = np.where(np.isfinite(validation_data_1_vec))
goodValues_validation2 = np.where(np.isfinite(validation_data_2_vec))
goodValues_validation3 = np.where(np.isfinite(validation_data_3_vec))
goodValues_validation7 = np.where(np.isfinite(validation_data_7_vec))
#print('validation predictor good values ', np.count_nonzero(goodValues_validation_predictor != noDataVal))
#print('validation 1 good values ', np.count_nonzero(np.isfinite(validation_data_1_vec))
#print('validation 2 good values ', np.count_nonzero(np.isfinite(validation_data_2_vec))
#print('validation 3 good values ', np.count_nonzero(np.isfinite(validation_data_3_vec))
#print('validation 7 good values ', np.count_nonzero(np.isfinite(validation_data_7_vec))
#Get the locations of all
intersectVal = np.intersect1d(goodValues_validation1[0], goodValues_validation7[0])
intersectVal = np.intersect1d(intersectVal, goodValues_validation2[0])
intersectVal = np.intersect1d(intersectVal, goodValues_validation3[0])
#Concatinate all the data into one array
all_validation_data = np.concatenate([validation_data_1_vec[intersectVal], validation_data_2_vec[intersectVal], \
validation_data_3_vec[intersectVal], validation_data_7_vec[intersectVal]],axis=1)
#validation_predictor_vec = validation_predictor_vec[intersectVal]
#Get a vector of predicted values for the whole site
pred_uncertaintyLAI_vec = regr_rf.predict(all_validation_data)
#difference_uncertaintyLAI_vec = validation_predictor_vec.flatten() - pred_uncertaintyLAI_vec
#Create a histogram of the predicted values
plt.hist(pred_uncertaintyLAI_vec,bins=250,histtype='step')
#plt.hist(difference_uncertaintyLAI_vec,bins=250,histtype='step')
#Create a new array of -9999 and replace them with the predicted values where the data existed before
outArray = np.zeros(len(validation_data_3_vec))
outArray = outArray - 9999
for counter in range (0, len(intersectVal)):
outArray[intersectVal[counter]] = pred_uncertaintyLAI_vec[counter]
#outDifArray = np.zeros(len(validation_data_3_vec))
#outDifArray = outDifArray - 9999
#for counter in range (0, len(intersectVal)):
# outDifArray[intersectVal[counter]] = difference_uncertaintyLAI_vec[counter]
#Make the array the right size
outArray = np.reshape(outArray, ((shapeFull[0] - rows_for_training - 1), shape[1]))
#outDifArray = np.reshape(outDifArray, ((shape[0] - rows_for_training - 1), shape[1]))
#Get he needed metadata
mapinfo = meanLAI_dataset.GetGeoTransform()
xMin = mapinfo[0]
yMax = mapinfo[3]
#Make geotiff files
array2raster('predict_LAIuncertainty_20170724_NDVI_LAI_Alb_NDWI.tif', (xMin, yMax), 1, -1, outArray, 26916) #predictions
#array2raster('predict_LAIuncertainty_difference_20170724_NDVI_lai_alb_ndwi_weight_lai_1.tif', (xMin, yMax), 1, -1, outDifArray, 26916) #difference
#Print out how important each feature was in the regressor
importances = regr_rf.feature_importances_
print(importances)
#Get the r2 value
r2_predictor = regr_rf.score(all_training_data, predictor_data_vec.flatten()) |
import os
def loadfile(name):
count = 0
file = open(name, "r")
f = file.readlines()
mapKey = {}
for ind in range(len(f)):
f[ind]=f[ind].rstrip()
for row in range(len(f)):
for col in range(len(f[row])):
if f[row][col] not in mapKey:
mapKey[f[row][col]]=str(count)
else:
mapKey[f[row][col]]+=", " + str(count)
count +=1
save_path = '/Users/zzw_e/Desktop/Research/Recommender game/game-level-recommender-system/examples/label'
output_name = os.path.join(save_path, "label_"+name)
output = open(output_name, "w")
output.write("object : Index\n\n")
for i in mapKey:
output.write(i + " : "+mapKey[i]+"\n\n")
file.close()
output.close()
def main():
path = "/Users/zzw_e/Desktop/Research/Recommender game/game-level-recommender-system/examples/gridphysics"
directory = os.fsencode(path)
fLst = []
for file in os.listdir(directory):
filename = os.fsdecode(file)
if filename.endswith(".txt") or filename.endswith(".py"):
# print(os.path.join(directory, filename))
fLst.append(filename)
else:
continue
fLst.sort()
print(fLst)
for file in fLst:
if "lvl" in file:
loadfile(file)
main()
loadfile("aliens_lvl0.txt") |
import argparse
from yacs.config import CfgNode as CN
from ..config import get_cfg
def default_argument_parser() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Segmentation Pipeline')
parser.add_argument("--config-file", default="", metavar="FILE",
help="path to config file")
parser.add_argument("opts", default=None, nargs=argparse.REMAINDER,
help="Modify config options using command-line")
return parser
def load_config(args: argparse.Namespace) -> CN:
cfg = get_cfg()
if args.config_file:
cfg.merge_from_file(args.config_file)
if args.opts:
cfg.merge_from_list(args.opts)
return cfg
|
import pymysql
db = pymysql.connect('localhost', 'root', 'root', 'student')
cursor = db.cursor()
sid = int(input('Enter student id: '))
daytomarkattendance = int(input('Enter day to mark attendance: '))
try:
r = cursor.execute('Update attendance set d' + str(daytomarkattendance) + ' = 1 where id = ' + str(sid))
if r:
print('Success')
else:
print('Failure')
db.commit()
except Exception as e:
print(e)
|
#https://www.hackerrank.com/contests/projecteuler/challenges/euler244/problem
letter_code = {"L": 76, "R": 82, "U": 85, "D": 68}
def get_checksum(seq, i=0, checksum=0):
checksum = (checksum * 243 + letter_code[seq[i]]) % 100000007
i += 1
if i >= len(seq):
return checksum
return get_checksum(seq, i, checksum)
print(get_checksum("LULUR"))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Singleton(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Singleton, cls).__new__(cls, *args, **kwargs)
return cls._instance
if __name__ == '__main__':
test_1 = Singleton()
test_2 = Singleton()
if id(test_1) == id(test_2):
print "Same"
else:
print "Different"
|
from django.urls import path
from website.views import LandindPageView
app_name = 'website'
urlpatterns = [
path('', LandindPageView.as_view(), name='landind_page')
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 开始爬取图片的网址(一般为主站)
START_URL = 'http://www.juemei.com/mm/'
# 需要爬取的网址的正则表达式
URL_TO_SCAN_PATT = r'/mm/[^"\']+'
# 需要下载的图片的正则表达式
IMG_TO_DOWN_PATT = (r'http://img\.juemei\.com/\w+/'
r'\d{4}-\d{2}-\d{2}/\w{13}\.jpg')
|
from __future__ import print_function
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import torch, cv2
import torch.utils.data
from torch.autograd import Variable
import numpy as np
from data.kth import KTH_LIST_img_dyan
import skimage
rootDir = '/data/huaiyu/data/kth_action_full_exp/e3d_action_full_64'
dyan_model = torch.load('/data/huaiyu/DYAN/DYAN_kth/weight/e3d_action_full_64/64_Model_3.pth').cuda()
dyan_model.eval()
experiment_index = 'kth_action_full_64_v7_1'
testingData = torch.utils.data.DataLoader(KTH_LIST_img_dyan(rootDir, 10, 'test'), batch_size=1, shuffle=False)
model_id = '195'
Encoder = torch.load('./checkpoint/' + experiment_index + '/Encoder_' + model_id + '.pt').cuda()
Generator = torch.load('./checkpoint/' + experiment_index + '/Generator_' + model_id + '.pt').cuda()
saveDir = './results/' + experiment_index
def reparameterization(mu, logvar):
std = torch.exp(logvar / 2)
sampled_z = Variable(
torch.cuda.FloatTensor(np.random.normal(0, 1, (mu.size(0), 128))))
z = sampled_z * std + mu
return sampled_z
all_sum_psnr = 0
with torch.no_grad():
for i, sample in enumerate(testingData):
Encoder.eval()
Generator.eval()
real_A = sample['real_A'].cuda()
real_B = sample['real_B'].cuda()
folderName = sample['Name'][0].split('/')[-2] + '_%05d' % int(
sample['Name'][0].split('/')[-1][-9:-5])
imgPath = os.path.join(saveDir, folderName)
if not os.path.exists(imgPath):
os.makedirs(imgPath)
real_A = torch.reshape(real_A, (1, 64, 128, 128))
mus, logvar = Encoder(real_A)
encoded_z = reparameterization(mus, logvar)
fake_B = Generator(real_A, encoded_z)
fake_frames_B = dyan_model.forward3(fake_B)
frames_B = dyan_model.forward3(real_B)
fake_frames_B = fake_frames_B.cpu().numpy().reshape(10, 128, 128)
frames_B = frames_B.cpu().numpy().reshape(10, 128, 128)
sum_psnr = 0
for j in range(10):
img_r = frames_B[j, :, :]
amin_r, amax_r = img_r.min(), img_r.max()
nor_r = (img_r - amin_r) / (amax_r - amin_r)
nor_r = nor_r * 255
cv2.imwrite(imgPath + '/real_B_{:d}.jpg'.format(j), nor_r)
img_f = fake_frames_B[j, :, :]
amin_f, amax_f = img_f.min(), img_f.max()
nor_f = (img_f - amin_f) / (amax_f - amin_f)
nor_f = nor_f * 255
cv2.imwrite(imgPath + '/fake_B_{:d}.jpg'.format(j), nor_f)
psnr = skimage.measure.compare_psnr(nor_r, nor_f, 255)
sum_psnr += psnr
sum_psnr = sum_psnr / 10
all_sum_psnr += sum_psnr
psnr_log = 'clips:{}, psnr:{}, avg_psnr:{}\n'.format(i, sum_psnr, all_sum_psnr / (i + 1))
print(psnr_log)
file = open('./results/log_' + experiment_index + '.txt', mode='a+')
file.write(psnr_log)
file.close()
|
"""
Simplistic non-optimized, native Python implementation showing the mechanics
of TimSort.
This code is designed to show how TimSort uses Insertion Sort and Merge Sort
as its constituent building blocks. It is not the actual sorting algorithm,
because of extra complexities that optimize this base algorithm even further.
Full details on the sorting algorithm are in the actual CPython code base,
but Tim Peters has provided documentation explaining reasons behind many
of the choices in Tim Sort.
https://hg.python.org/cpython/file/tip/Objects/listsort.txt
"""
import timeit
from algs.table import DataTable
def merge(A, lo, mid, hi, aux):
"""Merge two (consecutive) runs together."""
aux[lo:hi+1] = A[lo:hi+1]
left = lo
right = mid + 1
for i in range(lo, hi+1):
if left > mid:
A[i] = aux[right]
right += 1
elif right > hi:
A[i] = aux[left]
left += 1
elif aux[right] < aux[left]:
A[i] = aux[right]
right += 1
else:
A[i] = aux[left]
left += 1
# https://hg.python.org/cpython/file/tip/Objects/listsort.txt
# Instead we pick a minrun in range(32, 65) such that N/minrun is exactly a
# power of 2, or if that isn't possible, is close to, but strictly less than,
# a power of 2. This is easier to do than it may sound: take the first 6
# bits of N, and add 1 if any of the remaining bits are set. In fact, that
# rule covers every case in this section, including small N and exact powers
# of 2; merge_compute_minrun() is a deceptively simple function.
def compute_min_run(n):
"""Compute min_run to use when sorting n total values."""
# Used to add 1 if any remaining bits are set
r = 0
while n >= 64:
r |= n & 1
n >>= 1
return n + r
def insertion_sort(A, lo, hi):
"""Sort A[lo .. hi] using Insertion Sort. Stable sort demands Ai <= Aj. """
for i in range(lo+1,hi+1):
for j in range(i,lo,-1):
if A[j-1] <= A[j]:
break
A[j],A[j-1] = A[j-1],A[j]
def tim_sort(A):
"""Apply simplistic Tim Sort implementation on A."""
# Small arrays are sorted using insertion sort
N = len(A)
if N < 64:
insertion_sort(A,0,N-1)
return
# Insertion sort in strips of 'size'
size = compute_min_run(N)
for lo in range(0, N, size):
insertion_sort(A, lo, min(lo+size-1, N-1))
aux = [None]*N
while size < N:
# Merge all doubled ranges, taking care with last one
for lo in range(0, N, 2*size):
mid = min(lo + size - 1, N-1)
hi = min(lo + 2*size - 1, N-1)
merge(A, lo, mid, hi, aux)
size = 2 * size
def timing_nlogn_sorting_real_world(max_k=18, output=True):
"""
Confirm N Log N performance of Merge Sort, Heap Sort and Python's built-in sort
for n in 2**k for k up to (but not including) max_k=18.
Represents real-world case where Tim Sort shines, namely, where you are
adding random data to an already sorted set.
"""
# Build model
tbl = DataTable([12,10,10,10,10],['N','MergeSort', 'Quicksort', 'TimSort', 'PythonSort'],
output=output)
for n in [2**k for k in range(8, max_k)]:
t_ms = min(timeit.repeat(stmt='merge_sort(A)', setup='''
import random
from ch05.merge import merge_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_qs = min(timeit.repeat(stmt='quick_sort(A)', setup='''
import random
from ch05.sorting import quick_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ps = min(timeit.repeat(stmt='A.sort()', setup='''
import random
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
t_ts = min(timeit.repeat(stmt='tim_sort(A)', setup='''
import random
from ch05.timsort import tim_sort
A=list(range(int({0}*.8)))
B=list(range({0}-len(A)))
random.shuffle(B)
A.extend(B)'''.format(n), repeat=10, number=1))
tbl.row([n, t_ms, t_qs, t_ts, t_ps])
return tbl
|
from numpy import *
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import os
import pdb
from copy import deepcopy
# For heritage use of PyXFocus.
import PyXFocus.sources as src
import PyXFocus.transformations as trans
import PyXFocus.surfaces as surf
import PyXFocus.analyses as anal
import PyXFocus.conicsolve as conic
# For getting chromatic aberration implemented based on the actual line shape of Mg Kalpha.
import zp_design.emission_line_sources as emlines
# For the zone plates.
import zp_design.define_zp as dzp
import zp_design.zp_tolerancing as zpt
# Throwing in a zone plate so that we can test functionality.
n_rays = 10**5
#############################################################################################
# Defining the ray object class. This is identical to the ray class from arcusTrace.
class ray_obj:
def __init__(self, PyXFocusRays, wave):
self.opd = PyXFocusRays[0]
self.x = PyXFocusRays[1]
self.y = PyXFocusRays[2]
self.z = PyXFocusRays[3]
self.vx = PyXFocusRays[4]
self.vy = PyXFocusRays[5]
self.vz = PyXFocusRays[6]
self.nx = PyXFocusRays[7]
self.ny = PyXFocusRays[8]
self.nz = PyXFocusRays[9]
self.wave = wave
self.index = arange(len(PyXFocusRays[0]))
self.weight = ones(len(PyXFocusRays[0]))
def set_prays(self,PyXFocusRays, ind = None):
if ind is not None:
self.opd[ind] = PyXFocusRays[0]
self.x[ind] = PyXFocusRays[1]
self.y[ind] = PyXFocusRays[2]
self.z[ind] = PyXFocusRays[3]
self.vx[ind] = PyXFocusRays[4]
self.vy[ind] = PyXFocusRays[5]
self.vz[ind] = PyXFocusRays[6]
self.nx[ind] = PyXFocusRays[7]
self.ny[ind] = PyXFocusRays[8]
self.nz[ind] = PyXFocusRays[9]
else:
self.opd = PyXFocusRays[0]
self.x = PyXFocusRays[1]
self.y = PyXFocusRays[2]
self.z = PyXFocusRays[3]
self.vx = PyXFocusRays[4]
self.vy = PyXFocusRays[5]
self.vz = PyXFocusRays[6]
self.nx = PyXFocusRays[7]
self.ny = PyXFocusRays[8]
self.nz = PyXFocusRays[9]
def yield_prays(self, ind = None):
if ind is not None:
return [self.opd[ind],self.x[ind],self.y[ind],self.z[ind],self.vx[ind],self.vy[ind],self.vz[ind],self.nx[ind],self.ny[ind],self.nz[ind]]
else:
return [self.opd,self.x,self.y,self.z,self.vx,self.vy,self.vz,self.nx,self.ny,self.nz]
def yield_object_indices(self, ind):
new_object = deepcopy(self)
for key in self.__dict__.keys():
new_object.__dict__[key] = self.__dict__[key][ind]
return new_object
def pickle_me(self, pickle_file):
new_object = copy.deepcopy(self)
keys = new_object.__dict__.keys()
attribs = []
attribs.append(keys)
attribs.append([new_object.__dict__[key] for key in keys])
f = open(pickle_file,'wb')
cPickle.dump(attribs,f)
f.close()
class wolterI:
def __init__(self,r0,z0,z_ps = 2.5, z_ss = 2.5,mirror_length = 100.):
# Radius of curvature of the Wolter-I optic.
self.r0 = r0
# Focal length of the Wolter-I optic.
self.z0 = z0
# Z separation between node point and primary.
self.z_ps = z_ps
# Z separation between node point and secondary.
self.z_ss = z_ss
# Z length of the mirror
self.mirror_length = mirror_length
# Distance in Z between the node point and the zone plate collimator.
# The first number means that the ZP is 50 mm away from the Wolter-I optic
# nominally.
self.zp_wolter_sep = 50. + mirror_length + z_ps
self.primary_rmax = conic.primrad(self.z0 + self.z_ps + self.mirror_length,self.r0,self.z0)
self.primary_rmin = conic.primrad(self.z0 + self.z_ps,self.r0,self.z0)
#############################################################################################
# Defining the functions needed to raytrace a zone plate.
def illum_zp(xsource, ysource, zsource, rin, rout, tmin, tmax, num, source_size):
'''
Sub-apertured annulus beam confined to inner and outer radius
that converges to a location xsource, ysource, zsource.
Parameters
----------
xsource : int / float
X-location of the source.
ysource : int / float
Y-location of the source.
zsource : int / float
Z-location of the source.
rin : int / float
Inner radius of sub-apertured annulus beam.
rout : int / float
Outer radius of sub-apertured annulus beam.
tmin : int / float
Minimum angular extent of sub-apertured annulus beam.
tmax : int/ float
Maximum angular extent of sub-apertured annulus beam.
num : int
Number of rays to create.
source_size : int / float
Radial dimension of the source.
Returns
-------
rays : list
List of ray parameters (opd, x, y, z, l, m, n, ux, uy, uz).
'''
rho = sqrt(rin**2+random.rand(num)*(rout**2-rin**2))
theta = tmin + random.rand(num)*(tmax-tmin)
x = rho*cos(theta)
y = rho*sin(theta)
z = zeros(num)
rho_source, theta_source = random.rand(num)*source_size,random.rand(num)*2*pi
vx = x - xsource + rho_source*cos(theta_source)
vy = y - ysource + rho_source*sin(theta_source)
vz = z - zsource
norm = 1./sqrt(vx**2 + vy**2 + vz**2)
vx,vy,vz = vx*norm,vy*norm,vz*norm
ux = repeat(0., num)
uy = repeat(0., num)
uz = repeat(0., num)
opd = arange(num)
return [opd, x, y, z, vx, vy, vz, ux, uy, uz]
def trace_zp(zp,src_dist,n_rays = 10**6,tols = None,order = 1,wave = emlines.mgk_limited,source_size = 0.0):
'''
Generalized raytracing function for a zone plate. We create a point source at the defined
source distance, trace to the zone plate specified in zp, diffract based on order, and return the
rays immediately after they leave the zone plate. The zone plate is misaligned to the source based
on the tolerances given in "tols".
The zone plate is symmetrically placed about the +x axis.
Parameters:
-----------
zp: zone plate object as defined from the zone plate design module.
src_dist: distance between the zone plate and the source. This should be negative for forward propagation of the rays.
n_rays: number of rays to create as propagating from the source
tols: a list of the misalignment tolerances in the order:
[dz (defocus), offaxis_ang (theta in Menz dissertation), phi (mixing angle between misalignment in y vs. x), dd (error in groove density)].
Note that dx and dy are resulted to the off-axis angle via offaxis_ang = arctan(sqrt(dx**2 + dy**2)/(source_dist + dz)) and phi = arcsin(dy/sqrt(dx**2 + dy**2))
order: diffraction order
wave: emission line instance from emlines to create the wavelength distribution of the source rays. If a single number, makes
the beam monochromatic.
source_size: the size of the X-ray source. Note that due to implementation issue in PyXFocus, this doesn't work without a
modified PyXFocus.sources package.
'''
if tols is not None:
[dz,offaxis_ang,phi,dd] = tols
else:
[dz,offaxis_ang,phi,dd]= [0.,0.,0.,0.]
# Calculating where to place the source for raytracing the ZP. These account for the alignment tolerances --
# misplacement in z (defocus), off-axis angle and direction (offaxis_ang and phi).
norm = -(src_dist - dz)*tan(offaxis_ang)
xsource,ysource = cos(phi)*norm,sin(phi)*norm
# Creates the rays already vignetted by the zone plate. A segmented zone plate is handled by having a nonzero
# r_min and a theta_seg less than 2*pi defined in the zp instance.
pyxrays = illum_zp(xsource,ysource,src_dist - dz,zp.r_min,zp.r_max,-zp.theta_seg/2.,zp.theta_seg/2.,n_rays,source_size)
# We next draw on the wavelength distribution to give the rays each an individual wavelength
# and link the pyxrays to a ray object instance.
if type(wave) is float:
waves = ones(n_rays)*wave
else:
try:
waves = wave.draw_waves(n_rays)
except:
raise TypeError('parameter "wave" is an incompatible format. Please specify a float or an emlines instance.' )
# Preserving the idealized rays and creating the ray object for use with zp.trace_rays.
ideal_pyxrays = deepcopy(pyxrays)
rays = ray_obj(pyxrays,waves)
# Finally, diffracting through the zone plate.
zp.trace_rays(rays,order)
return rays
def focus_zp_rays(rays):
'''
Quick handle function for diagnostics.
'''
pyxrays = rays.yield_prays()
dz = anal.analyticImagePlane(pyxrays)
trans.transform(pyxrays,0.,0.,dz,0.,0.,0.)
surf.flat(pyxrays)
return rays,pyxrays,dz
#############################################################################################
# This set of functions enables the tracing of a zone plate / Wolter-I optical system, as well
# as facilitating a direct comparison between being directly illuminated by the beamline as
# being illuminated by a collimating zone plate.
def wolterItrace(rays,wolterIoptic,scatter = False):
'''
A raytracing function taking a ray object centered on a Wolter-I optic defined
by the WolterI class and tracing through this optic handling vignetting.
Rays need to already be at the common Wolter-I focus.
'''
# First, getting the PyXFocus rays out of the ray object.
pyxrays = rays.yield_prays()
# Find which rays intersect with primary mirror surface.
surf.wolterprimary(pyxrays, wolterIoptic.r0, wolterIoptic.z0)
mask1 = logical_and(pyxrays[3] < wolterIoptic.z_ps + wolterIoptic.mirror_length + wolterIoptic.z0 ,pyxrays[3] > wolterIoptic.z_ps + wolterIoptic.z0)
# Filter out photons which would not strike primary surface and propagate good photons.
pyxrays = [x[mask1] for x in pyxrays]
new_waves = rays.wave[mask1]
trans.reflect(pyxrays)
# Creating a new ray object on the primary mirror.
primary_pyxrays = deepcopy(pyxrays)
primary_rays = ray_obj(primary_pyxrays,wave = new_waves)
# Performing the same set of steps for the secondary mirror.
surf.woltersecondary(pyxrays, wolterIoptic.r0, wolterIoptic.z0)
mask2 = logical_and(pyxrays[3] > wolterIoptic.z0 - wolterIoptic.z_ss - wolterIoptic.mirror_length, wolterIoptic.z0, pyxrays[3] < wolterIoptic.z0 - wolterIoptic.z_ss)
pyxrays = [x[mask2] for x in pyxrays]
new_waves = new_waves[mask2]
trans.reflect(pyxrays)
# Creating a new ray object on the primary mirror.
secondary_pyxrays = deepcopy(pyxrays)
secondary_rays = ray_obj(secondary_pyxrays,wave = new_waves)
# Add scatter.
if scatter is True:
pyxrays[4] = pyxrays[4] + random.normal(scale=1.5e-10, size=len(pyxrays[4]))
pyxrays[5] = pyxrays[5] + random.normal(scale=1.5e-6, size=len(pyxrays[5]))
pyxrays[6] = -sqrt(1.-pyxrays[5]**2-pyxrays[4]**2)
# Propagating rays to this nominal focus location, computing the optimal focus location,
# and returning the computed rays.
surf.flat(pyxrays)
dz = anal.analyticImagePlane(pyxrays)
pyxrays[3] = ones(len(pyxrays[3]))*dz # Tracking the z-displacement relative to the nominal focus at zero.
focused_rays = ray_obj(pyxrays,wave = new_waves)
return primary_rays,secondary_rays,focused_rays,dz
def zp_before_wolter_trace(tol_budget,wolterIoptic,wave = emlines.mgk_limited,order = 1., zp_wolter_sep = 152.5):
'''
Calculates the radius of curvature resulting from a CZP with a given tolerance budget.
'''
tols = [tol_budget.dz,tol_budget.offaxis_ang,tol_budget.phi,tol_budget.dr]
#############################
# Tracing the zone plate first.
rays = trace_zp(tol_budget.zp,-tol_budget.zp.f,tols = tols,order = order,wave = wave,source_size = tol_budget.source_size)
pyxrays = rays.yield_prays()
#############################
# Handling the transforms necessary for the zone plate rays to interact with the WolterI optic correctly.
# Next, translates the zone plate to be at the center of the ZP coordinates in XY,
# moves back to intersection point of the Wolter-I pair, and brings the rays there.
trans.transform(pyxrays,0.,0.,zp_wolter_sep, 0.,0.,0.) # mean(pyxrays[1]),mean(pyxrays[2])
# Rotating the rays so that +z can point towards the mirrors and back towards the source.
trans.transform(pyxrays,0.,0.,0.,0.,pi,pi/2)
# Moving everything into the common coodinate system of the Wolter-I optic.
trans.transform(pyxrays,0.,-wolterIoptic.r0,-wolterIoptic.z0,0.,0.,0.)
# Putting rays on the zone plate into a new "zone_plate_rays" object so that everything is in the same coordinate system.
zp_rays = deepcopy(pyxrays)
zone_plate_rays = ray_obj(zp_rays,wave = rays.wave)
##############################
# Next, adding an aperture mask to ensure that we're eliminating the zeroth order contribution.
# This happens immediately after (100 mm) the collimating zone plate.
mask_czp_sep = 100.
mask_tolerance = 0.5
# Moving forward to CZP, then back by the mask separation.
trans.transform(pyxrays,0.,0.,zp_wolter_sep + wolterIoptic.z0 - mask_czp_sep,0.,0.,0.)
surf.flat(pyxrays)
# Calculating which rays make it through this mask.
mask_condition = logical_and(sqrt(rays.x**2 + rays.y**2) < wolterIoptic.primary_rmax + mask_tolerance, sqrt(rays.x**2 + rays.y**2) > wolterIoptic.primary_rmin - mask_tolerance)
# Now moving the rays back to the WolterI coordinates, and the rays should now be on the mask.
trans.transform(pyxrays,0.,0.,-zp_wolter_sep - wolterIoptic.z0 + mask_czp_sep,0.,0.,0.)
# And finally, applying the cut condition so that we get out only rays that pass through the mask.
mask_rays = rays.yield_object_indices(ind = mask_condition)
#if sum(mask_condition)
## Passing to the WolterI raytracing function, which handles everything else.
#primary_rays,secondary_rays,focused_rays,dz = wolterItrace(mask_rays,wolterIoptic)
return zone_plate_rays, mask_rays # , primary_rays, secondary_rays, focused_rays, dz
def compare_beamline_vs_zp(tol_budget,wolterIoptic,wave = emlines.mgk_limited,source_size = 0.0):
# Doing the zp trace with all the other functions that we built.
zp_rays, zp_wolter_rays, zp_dz = zp_wolter_system_trace(tol_budget, wolterIoptic, wave = wave, source_size = source_size)
# Doing the beamline-only trace by tracing to a ZP but setting the response to be that of zeroth order.
beamline_null_tols = zpt.tol_allocation(zp = tol_budget.zp, dz = 0., dr = 0.0, offaxis_ang = 0.0, phi = 0.0)
beamline_rays,beamline_wolter_rays,beamline_zp = zp_wolter_system_trace(beamline_null_tols, wolterIoptic, order = 0.0, wave = wave, source_size = source_size)
return zp_rays,beamline_rays,zp_wolter_rays,beamline_wolter_rays,zp_dz,beamline_zp
def phi_sample(zp,wolterIoptic,dzs,offaxis_angs,phis = linspace(0,9./5*pi,10)):
zp_grid = zeros((len(dzs),len(offaxis_angs),len(phis)))
beam_grid = zeros((len(dzs),len(offaxis_angs),len(phis)))
for i in range(len(dzs)):
for j in range(len(offaxis_angs)):
for k in range(len(phis)):
czp_tols = zpt.tol_allocation(zp,dz = dzs[i],dr = 0.0,offaxis_ang = offaxis_angs[j],phi = phis[k])
zp_rays,beamline_rays,zp_wolter_rays,beamline_wolter_rays,zp_dz,beamline_zp = compare_beamline_vs_zp(czp_tols,wolterIoptic,wave = emlines.mgk_limited,source_size = 0.060)
zp_grid[i,j,k] = zp_dz
beam_grid[i,j,k] = beamline_zp
print dzs[i],offaxis_angs[j],phis[k]
return zp_grid,beam_grid
|
import math
"""program pro vypocet polohy jednotlivych poledniku a rovnobezek v danem zobrazeni"""
def zadani_osetreni_polomeru():
"""Vyzada od uzivatele polomer, pro hodnotu 0 je polomer definovany 6371.11km. Vystupem je polomer osetreny od chyb"""
while True:
polomerstr = input("zadej polomer v km:")
R = float(polomerstr)
if R <0:
print("chybne zadany polomer - zadejte kladne cislo")
elif R ==0:
return 6371.11*10000
else:
return R*10000
def zadani_osetreni_meritka():
"""Vyzada od uzivatele hodnotu meritka.Vystupem je meritko ostetrene od chyb"""
while True:
meritkostr = input("zadej meritko:")
try:
m = int(meritkostr)
if m <=0:
print("chybne zadane meritko - zadejte kladne cislo")
else:
m = m / 10
return m
except:
print("chybne zadane meritko - zadejte cele cislo")
def zadani_osetreni_pismeneproZobr():
"""Vyzada od uzivatele druh zobrazeni. Vystupem funkce je definice daneho zobrazeni."""
while True:
zobrazeni = input("zadej pismeno A,B,M nebo L: ")
if zobrazeni in ("A","B","M","L"):
return zobrazeni
else:
print("nespravne zadane pismeno")
def vypocet_poledniku (R,m):
""" Vstupem funkce je meritko a polomer, vystupem je list souradnic poledniku po deseti stupnich."""
v = [i for i in range(-180, 190, 10)]
poledniky = []
for i in v:
x=(R*i*math.pi/180)/m
x=round(x,1)
#osetreni pokud je hodnota vyssi nez jeden metr
if -100 >= x or x >=100:
x="-"
poledniky.append(x)
return poledniky
#vypocet rovnobezek
def vypocet_rovnobezek(R,m,zobrazeni):
"""Vstupem funkce je meritko a polomer, vystupem je list souradnic rovnobezek po deseti stupnich."""
u = [i for i in range(-90, 100, 10)]
rovnobezky = []
v = [i for i in range(-180,190,10)]
for i in u:
if zobrazeni=="L":
y=(R*math.sin(i*math.pi/180))/m
elif zobrazeni=="A":
y=(R*i*math.pi/180)/m
elif zobrazeni=="B":
y=(2*R*math.tan(i/2*math.pi/180))/m
elif zobrazeni=="M":
if abs(i)==90:
y=float("inf") #pol se nezobrazuje
else:
y=(R*math.log(math.tan(((i/2)+45)* math.pi / 180)))/m
#osetreni pokud je hodnota vetsi nez jeden metr
y=round(y,1)
if -100 >= y or y >=100:
y="-"
rovnobezky.append(y)
return rovnobezky
#volani funkci
R = zadani_osetreni_polomeru()
m=zadani_osetreni_meritka()
zobrazeni=zadani_osetreni_pismeneproZobr()
poledniky=vypocet_poledniku (R,m)
rovnobezky=vypocet_rovnobezek(R,m,zobrazeni)
#vypsani poledniku a rovnobezek
print ("Rovnobezky:",rovnobezky)
print ("Poledniky:",poledniky)
|
from loader import bot, storage
import os
import redis
import sqlite3
os.system('redis-server /etc/redis/6379.conf')
redis_control = redis.Redis()
db = sqlite3.connect('Words_Data_Base.db')
# "on_shutdown" function that called at the end of bot
async def on_shutdown(dp):
await bot.close()
await storage.close()
os.system('redis-cli shutdown')
db.close()
if __name__ == '__main__':
from aiogram import executor
from handlers import dp
executor.start_polling(dp, on_shutdown=on_shutdown)
|
import sys
import textwrap
def wrap(text):
result = textwrap.wrap(text, width=10, replace_whitespace=False)
print('\n'.join(result))
if __name__ == '__main__':
wrap(sys.argv[1])
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrderBottom(self, root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
def traverse(node, level):
if node.left and not node.right:
level += 1
traverse(node.left, level)
level -= 1
elif node.right and not node.left:
level += 1
traverse(node.right, level)
level -= 1
elif node.left and node.right:
level += 1
traverse(node.left, level)
traverse(node.right, level)
level -= 1
if level not in order_dict:
order_dict[level] = [node.val]
else:
order_dict[level].append(node.val)
if not root:
return []
order_dict = {}
ret_val = []
level = 1
traverse(root, level)
for i in range(len(order_dict), 0, -1):
ret_val.append(order_dict[i])
return ret_val
|
# Some questions about string, list and their methods.
# str.method() returns a copy of str.
# lst.method() changes the lst and returns None.
str = ' adfsdfsdf'
print(str)
str.strip()
print(str)
print(str.strip())
nstr = str.strip()
print(nstr)
lst = ['d','e','a','c']
print(lst)
lst.sort()
print(lst)
# 输出排序后列表
print(lst.sort())
# 输出none
nlst = lst.sort()
print(nlst)
# 输出none
nlst = sorted(lst)
print(nlst)
# 输出排序后列表
print(lst.sort()==sorted(lst))
print(lst.sort()!=sorted(lst))
# 输出False 因为 None != sorted list. |
"""This module manages the views of the categories app."""
from rest_framework import generics, permissions, status
from rest_framework.response import Response
from rest_framework.views import APIView
from providers.models import Provider
from providers.serializers import ProviderListSerializer, ProviderSerializer
class ProviderView(APIView):
"""
This class manages the view to create and list the providers.
Attributes:
permission_classes (list(Permissions)): The options to access at this resource.
"""
permission_classes = [permissions.IsAuthenticated]
def get(self, request, format=None):
"""
Retrieve the providers list.
Attributes:
request (Request): The request sent to the api.
format (NoneType): Always none, pass by Accept header.
Returns:
200: The list of providers.
401: The user must be connected to access this resource.
406: The response format is not acceptable by the server.
500: An error was occured in the treatment of the request.
"""
providers = Provider.objects.filter(owner=request.user) # pylint: disable=no-member
serializer = ProviderListSerializer(providers, many=True)
return Response(serializer.data)
def post(self, request, format=None):
"""
Create a provider.
Attributes:
request (Request): The request sent to the api.
format (NoneType): Always none, pass by Accept header.
Returns:
201: The provider is created.
400: An error is detected on the request data.
401: The user must be connected to access this resource.
406: The response format is not acceptable by the server.
500: An error was occured in the treatment of the request.
"""
serializer = ProviderSerializer(data=request.data)
if serializer.is_valid():
serializer.save(owner=request.user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class ProviderDetail(generics.RetrieveUpdateDestroyAPIView):
"""
This class manages the view to update and delete a provider.
Attributes:
permission_classes (list(Permissions)): The options to access at this resource.
serializer_class (Serializer): The serializer to bind the request and the response object.
Returns:
200: The provider is updated.
204: The provider is deleted.
400: An error is detected on the request data.
401: The user must be connected to access this resource.
406: The response format is not acceptable by the server.
500: An error was occured in the treatment of the request.
"""
permission_classes = [permissions.IsAuthenticated]
serializer_class = ProviderSerializer
def get_queryset(self):
"""Return the category of the owner."""
return Provider.objects.filter(owner=self.request.user, pk=self.kwargs['pk']) # pylint: disable=no-member
def perform_update(self, serializer):
"""Update the owner of the category before update it."""
serializer.save(owner=self.request.user)
|
# importing the dependencies
import os
import sys
# from './build/account' import Account # isort: skip
# setup accounts
account1 = Account()
account2 = Account()
# Account 1 Transactions
account1.deposit(100.0)
account1.deposit(100.0)
account1.withdraw(50.0)
# Account 2 Transactions
account2.deposit(200.0)
account2.deposit(200.0)
# test
assert account1.get_balance() == 150.0
assert account2.get_balance() == 400.0 |
from django.shortcuts import get_object_or_404, render, redirect, HttpResponse, render_to_response
from django.template import RequestContext
from models import *
import spotipy
from spotify_util import *
from django.contrib.auth.decorators import login_required
def index(request):
hot_albums = Album.objects.order_by('-likes')[:4]
context_dict = {'hot_albums': hot_albums}
return render(request, 'musicool/index.html', context_dict)
def about(request):
context_dict = {}
return render(request, 'musicool/about.html', context_dict)
def album(request, album_id):
pages = []
album = get_object_or_404(Album, id=album_id)
tracks = Track.objects.filter(album=album)
context_dict = {'album': album, 'tracks': tracks}
return render(request, 'musicool/album.html', context_dict)
@login_required
def spotify_ask_authorized(request):
prompt_user_login(username=request.user.username)
return redirect('/musicool/spotify_login/')
@login_required
def spotify_login(request):
# Get token
if 'code' in request.GET: # Retrieve token for the first time (No Cache)
code = request.GET['code']
token = exchange_token_with_code(code, username=request.user.username)
else: # Retrieve token from cache
token = exchange_token_with_code(username=request.user.username)
if token: # Check if user gets token
sp = spotipy.Spotify(auth=token)
#user = sp.current_user()
#print user['id']
return redirect('/musicool/')
def search_spotify(request):
result_list=[]
context_dict={}
if request.method == 'POST':
query = request.POST['query']
if query:
search_spotify_helper(query)
context_dict['result_list']=result_list
return render(request, 'musicool/search_spotify.html', context_dict)
def socialauth(request):
context_dict = {'user':request.user, 'request':request}
return render(request, 'musicool/socialauth.html', context_dict)
def complete_google_auth(request):
return redirect('/musicool/') |
import assertions
#####################################
# DTO
#####################################
class DTO(object):
def __init__(self,kw):
self._instance_version = self.code_version
d = dict(kw) # make (shallow) copy of d before changing it
d.pop('self', None)
clsname = type(self).__name__
if clsname in d and d[clsname]==type(self): # side effect of using super(A,self).__init__(locals()) in subclass - A shows up in locals (ironpython only?)
d.pop(clsname)
self._DTO_members = d.keys()
self._DTO_members.sort()
for name,val in d.iteritems():
setattr(self,name,val)
@property
def code_version(self):
return getattr(type(self), '_code_version', 0)
@property
def instance_version(self):
return getattr(self, '_instance_version', 0)
@instance_version.setter
def instance_version(self,val): #PyFlakesIgnore
self._instance_version = val
def _add_DTO_member(self,name):
"""Used internally. deprecated for use in upgrade scripts. use DTO_add_field instead"""
assertions.fail_if(name in self._DTO_members,'name already in _DTO_members',name=name,members=self._DTO_members)
self._DTO_members.append(name)
self._DTO_members.sort()
def _remove_DTO_member(self,name):
"""Used internally. deprecated for use in upgrade scripts. use DTO_remove_field instead"""
assertions.fail_unless(name in self._DTO_members,'name not in _DTO_members',name=name,members=self._DTO_members)
self._DTO_members = [x for x in self._DTO_members if x != name]
def DTO_add_field(self,name,value):
self._add_DTO_member(name)
setattr(self,name,value)
return self
def DTO_remove_field(self,name,field_names=None):
if field_names is None:
field_names = [name]
for fname in field_names:
delattr(self,fname)
self._remove_DTO_member(name)
return self
def _to_dct(self):
return dict(self.itermembers())
def itermembers(self):
return ((k, getattr(self, k)) for k in self._DTO_members)
def __str__(self):
contents = ['%s=%s' % (k,getattr(self,k)) for k in self._DTO_members]
return '%s(%s)' % (self.__class__.__name__, ', '.join(contents))
def __repr__(self):
contents = ['%s=%r' % (k,getattr(self,k)) for k in self._DTO_members]
return '%s(%s)' % (self.__class__.__name__, ', '.join(contents))
def __cmp__(self,other):
if type(self) != type(other):
return cmp(type(self).__name__, type(other).__name__)
if self._DTO_members != other._DTO_members:
return cmp(self._DTO_members, other._DTO_members)
for k in self._DTO_members:
x = cmp(getattr(self,k),getattr(other,k))
if x != 0:
return x
return 0
def _eq_with_diff(self, other, logger=None):
if type(self).__name__ != type(other).__name__:
if logger is not None:
logger.Debug('DTO.__eq__ - types are different type(self)=%s, type(other)=%s' % (type(self),type(other)))
return False
if self._DTO_members != other._DTO_members:
if logger is not None:
logger.Debug('DTO.__eq__ - members are different self=%s, other=%s' % (self._DTO_members,other._DTO_members))
return False
for k in self._DTO_members:
self_k = getattr(self,k)
other_k = getattr(other,k)
if self_k != other_k:
if logger is not None:
logger.Debug('DTO.__eq__ - member %s is different. self=%s, other=%s' % (k,self_k,other_k))
return False
return True
def __eq__(self,other):
"""Provide __eq__ and __ne__ when members don't support cmp (e.g, for sets)"""
return self._eq_with_diff(other)
def __ne__(self,other):
return not self.__eq__(other)
def __hash__(self):
res = 0
for k in self._DTO_members:
val = getattr(self,k)
try:
res = res ^ hash(val)
except:
pass # ignore members which aren't hashable
return res
#####################################
# Bunch
#####################################
class Bunch(DTO):
_code_version = 0
def __init__(self,**kw):
kw['self'] = self
super(Bunch,self).__init__(kw)
class BunchKw(DTO):
_code_version = 0
def __init__(self,kw):
kw['self'] = self
super(BunchKw,self).__init__(kw)
#####################################
# Anything
#####################################
class Anything(object):
"""Provides an object x which you can hang arbitrary data on:
x.y = my_data
x.z = some_other_data
"""
def __init__(self,**kw):
for k,v in kw.iteritems():
setattr(self,k,v)
def __repr__(self):
return 'Anything(%s)' % ', '.join('%s=%s' % (k,v) for k,v in self.__dict__.iteritems())
def __eq__(self,other):
"""Provide __eq__ and __ne__ when members don't support cmp (e.g, for sets)"""
return self.__dict__ == other.__dict__
|
def LiqEtlp_pT(P,T,x_N2):
x = (P-5.53901951e+02)/3.71707300e-01
y = (T--1.77479018e+02)/2.63824000e-02
z = (x_N2-9.59063881e-01)/4.39751219e-03
output = \
1*-9.65086300e+02
liq_etlp = output*1.00000000e+00+0.00000000e+00
return liq_etlp |
# This program converts JSON files to CSV, while dealing with encoding issues
import json
import csv
# Code altered from https://www.geeksforgeeks.org/convert-json-to-csv-in-python/
# and https://stackoverflow.com/questions/21058935/python-json-loads-shows-valueerror-extra-data/51830719
temp_storage = []
# Replace the filename allrecipes-recipes.json with the json file to convert
for line in open('allrecipes-recipes.json', 'r'):
temp_storage.append(json.loads(line))
# This creates the end file and deals with encoding issues; rename the file recipes.csv as desired
data_file = open('recipes.csv', 'w', encoding='utf-8')
csv_writer = csv.writer(data_file)
# This is to write the headers for the csv file
count = 0
# This goes through the JSON lines and writes them to the csv
for row in temp_storage:
if count == 0:
header = row.keys()
csv_writer.writerow(header)
count += 1
csv_writer.writerow(row.values())
data_file.close() |
# -*- coding: utf-8 -*-
'''
Simple RPC
Copyright (c) 2012-2013, LastSeal S.A.
'''
from simplerpc.base.SimpleRpcLogicBase import SimpleRpcLogicBase
from simplerpc.testing.exposed_api.TwinModulesManager import TwinModulesManager
from simplerpc.testing.exposed_api.ModuleUnitTestRunner import ModuleUnitTestRunner
from simplerpc.context.SimpleRpcContext import SimpleRpcContext
from simplerpc.common.FileManager import FileManager
from simplerpc.expose_api.javascript.ClassToJsUnitTest import ClassToJsUnitTest
import os
import logging
class ExposedModuleAutotester(SimpleRpcLogicBase):
'''
#TODO: document
'''
def __init__(self, context=None):
if context == None:
context = SimpleRpcContext(self.__class__.__name__)
context.log.setLevel(logging.DEBUG)
SimpleRpcLogicBase.__init__(self, context)
def __post_init__(self):
self.twins_manager = TwinModulesManager(self.context)
self.module_unit_test_runner = ModuleUnitTestRunner(self.context)
self.file_manager = FileManager(self.context)
self.class_to_js_unittest = ClassToJsUnitTest(self.context)
def autoTest(self):
tested_class = self.__getTestedClass()
self.module_unit_test_runner.runPythonTest(tested_class)
def createJsUnitTest(self, overwrite=False):
tested_class = self.__getTestedClass()
file_path = self.twins_manager.getJsUnittest(tested_class)
self.class_to_js_unittest.translateToFile(tested_class, file_path, overwrite)
def __getClassName(self, module_path):
return os.path.splitext(os.path.basename(module_path))[0]
def __getTestedClass(self):
import __main__
return getattr(__main__, self.__getClassName(__main__.__file__))
def smokeTestModule():
context = SimpleRpcContext('smoke test')
ema = ExposedModuleAutotester(context)
def getTestedClass():
from example_rpc.exposed_api.images.ImagesBrowser import ImagesBrowser
return ImagesBrowser
ema._ExposedModuleAutotester__getTestedClass = getTestedClass
ema.autoTest()
ema.createJsUnitTest(overwrite=False)
if __name__ == "__main__":
smokeTestModule()
|
from collections import defaultdict
from typing import List
import numpy as np
import xarray as xr
from ..utils.coding import set_time_encodings
from ..utils.log import _init_logger
# fmt: off
from .set_groups_base import SetGroupsBase
# fmt: on
logger = _init_logger(__name__)
class SetGroupsEK60(SetGroupsBase):
"""Class for saving groups to netcdf or zarr from EK60 data files."""
# The sets beam_only_names, ping_time_only_names, and
# beam_ping_time_names are used in set_groups_base and
# in converting from v0.5.x to v0.6.0. The values within
# these sets are applied to all Sonar/Beam_groupX groups.
# 2023-07-24:
# PRs:
# - https://github.com/OSOceanAcoustics/echopype/pull/1056
# - https://github.com/OSOceanAcoustics/echopype/pull/1083
# The artificially added beam and ping_time dimensions at v0.6.0
# were reverted at v0.8.0, due to concerns with efficiency and code clarity
# (see https://github.com/OSOceanAcoustics/echopype/issues/684 and
# https://github.com/OSOceanAcoustics/echopype/issues/978).
# However, the mechanisms to expand these dimensions were preserved for
# flexibility and potential later use.
# Note such expansion is still applied on AZFP data for 2 variables
# (see set_groups_azfp.py).
# Variables that need only the beam dimension added to them.
beam_only_names = set()
# Variables that need only the ping_time dimension added to them.
ping_time_only_names = set()
# Variables that need beam and ping_time dimensions added to them.
beam_ping_time_names = set()
beamgroups_possible = [
{
"name": "Beam_group1",
"descr": (
"contains backscatter power (uncalibrated) and other beam or"
" channel-specific data, including split-beam angle data when they exist."
),
}
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# obtain sorted channel dict in ascending order
channels = list(self.parser_obj.config_datagram["transceivers"].keys())
channel_ids = {
ch: self.parser_obj.config_datagram["transceivers"][ch]["channel_id"] for ch in channels
}
# example sorted_channel from a 5-channel data file for future reference:
# 1: 'GPT 18 kHz 009072034d45 1-1 ES18-11'
# 2: 'GPT 38 kHz 009072033fa2 2-1 ES38B'
# 3: 'GPT 70 kHz 009072058c6c 3-1 ES70-7C'
# 4: 'GPT 120 kHz 00907205794e 4-1 ES120-7C'
# 5: 'GPT 200 kHz 0090720346a8 5-1 ES200-7C'
# In some examples the channels may not be ordered, thus sorting is required
self.sorted_channel = dict(sorted(channel_ids.items(), key=lambda item: item[1]))
# obtain corresponding frequency dict from sorted channels
self.freq = [
self.parser_obj.config_datagram["transceivers"][ch]["frequency"]
for ch in self.sorted_channel.keys()
]
def set_env(self) -> xr.Dataset:
"""Set the Environment group."""
# Loop over channels
ds_env = []
for ch in self.sorted_channel.keys():
ds_tmp = xr.Dataset(
{
"absorption_indicative": (
["time1"],
self.parser_obj.ping_data_dict["absorption_coefficient"][ch],
{
"long_name": "Indicative acoustic absorption",
"units": "dB/m",
"valid_min": 0.0,
},
),
"sound_speed_indicative": (
["time1"],
self.parser_obj.ping_data_dict["sound_velocity"][ch],
{
"long_name": "Indicative sound speed",
"standard_name": "speed_of_sound_in_sea_water",
"units": "m/s",
"valid_min": 0.0,
},
),
},
coords={
"time1": (
["time1"],
self.parser_obj.ping_time[ch],
{
"axis": "T",
"long_name": "Timestamps for NMEA position datagrams",
"standard_name": "time",
"comment": "Time coordinate corresponding to environmental variables.",
},
)
},
)
# Attach channel dimension/coordinate
ds_tmp = ds_tmp.expand_dims({"channel": [self.sorted_channel[ch]]})
ds_tmp["channel"] = ds_tmp["channel"].assign_attrs(
self._varattrs["beam_coord_default"]["channel"]
)
ds_tmp["frequency_nominal"] = (
["channel"],
[self.parser_obj.config_datagram["transceivers"][ch]["frequency"]],
{
"units": "Hz",
"long_name": "Transducer frequency",
"valid_min": 0.0,
"standard_name": "sound_frequency",
},
)
ds_env.append(ds_tmp)
# Merge data from all channels
ds = xr.merge(ds_env)
return set_time_encodings(ds)
def set_sonar(self) -> xr.Dataset:
"""Set the Sonar group."""
# Add beam_group and beam_group_descr variables sharing a common dimension
# (beam_group), using the information from self._beamgroups
self._beamgroups = self.beamgroups_possible
beam_groups_vars, beam_groups_coord = self._beam_groups_vars()
ds = xr.Dataset(beam_groups_vars, coords=beam_groups_coord)
# Assemble sonar group global attribute dictionary
sonar_attr_dict = {
"sonar_manufacturer": "Simrad",
"sonar_model": self.sonar_model,
# transducer (sonar) serial number is not stored in the EK60 raw data file,
# so sonar_serial_number can't be populated from the raw datagrams
"sonar_serial_number": "",
"sonar_software_name": self.parser_obj.config_datagram["sounder_name"],
"sonar_software_version": self.parser_obj.config_datagram["version"],
"sonar_type": "echosounder",
}
ds = ds.assign_attrs(sonar_attr_dict)
return ds
def set_platform(self) -> xr.Dataset:
"""Set the Platform group."""
# Collect variables
# Read lat/long from NMEA datagram
time1, msg_type, lat, lon = self._extract_NMEA_latlon()
# NMEA dataset: variables filled with np.nan if they do not exist
platform_dict = {"platform_name": "", "platform_type": "", "platform_code_ICES": ""}
# Values for the variables below having a channel (ch) dependence
# are identical across channels
ch = list(self.sorted_channel.keys())[0]
ds = xr.Dataset(
{
"latitude": (
["time1"],
lat,
self._varattrs["platform_var_default"]["latitude"],
),
"longitude": (
["time1"],
lon,
self._varattrs["platform_var_default"]["longitude"],
),
"sentence_type": (
["time1"],
msg_type,
self._varattrs["platform_var_default"]["sentence_type"],
),
"pitch": (
["time2"],
self.parser_obj.ping_data_dict["pitch"][ch],
self._varattrs["platform_var_default"]["pitch"],
),
"roll": (
["time2"],
self.parser_obj.ping_data_dict["roll"][ch],
self._varattrs["platform_var_default"]["roll"],
),
"vertical_offset": (
["time2"],
self.parser_obj.ping_data_dict["heave"][ch],
self._varattrs["platform_var_default"]["vertical_offset"],
),
"water_level": (
[],
# a scalar, assumed to be a constant in the source transducer_depth data
self.parser_obj.ping_data_dict["transducer_depth"][ch][0],
self._varattrs["platform_var_default"]["water_level"],
),
**{
var: ([], np.nan, self._varattrs["platform_var_default"][var])
for var in [
"MRU_offset_x",
"MRU_offset_y",
"MRU_offset_z",
"MRU_rotation_x",
"MRU_rotation_y",
"MRU_rotation_z",
"position_offset_x",
"position_offset_y",
"position_offset_z",
]
},
},
coords={
"time1": (
["time1"],
time1,
{
**self._varattrs["platform_coord_default"]["time1"],
"comment": "Time coordinate corresponding to NMEA position data.",
},
),
"time2": (
["time2"],
self.parser_obj.ping_time[ch],
{
"axis": "T",
"long_name": "Timestamps for platform motion and orientation data",
"standard_name": "time",
"comment": "Time coordinate corresponding to platform motion and "
"orientation data.",
},
),
},
)
# Loop over channels and merge all
ds_plat = []
for ch in self.sorted_channel.keys():
ds_tmp = xr.Dataset(
{
"transducer_offset_x": (
[],
self.parser_obj.config_datagram["transceivers"][ch].get("pos_x", np.nan),
self._varattrs["platform_var_default"]["transducer_offset_x"],
),
"transducer_offset_y": (
[],
self.parser_obj.config_datagram["transceivers"][ch].get("pos_y", np.nan),
self._varattrs["platform_var_default"]["transducer_offset_y"],
),
"transducer_offset_z": (
[],
self.parser_obj.config_datagram["transceivers"][ch].get("pos_z", np.nan),
self._varattrs["platform_var_default"]["transducer_offset_z"],
),
},
)
# Attach channel dimension/coordinate
ds_tmp = ds_tmp.expand_dims({"channel": [self.sorted_channel[ch]]})
ds_tmp["frequency_nominal"] = (
["channel"],
[self.parser_obj.config_datagram["transceivers"][ch]["frequency"]],
{
"units": "Hz",
"long_name": "Transducer frequency",
"valid_min": 0.0,
"standard_name": "sound_frequency",
},
)
ds_plat.append(ds_tmp)
# Merge data from all channels
# TODO: for current test data we see all
# pitch/roll/heave are the same for all freq channels
# consider only saving those from the first channel
ds_plat = xr.merge(ds_plat)
ds_plat["channel"] = ds_plat["channel"].assign_attrs(
self._varattrs["beam_coord_default"]["channel"]
)
# Merge with NMEA data
ds = xr.merge([ds, ds_plat], combine_attrs="override")
ds = ds.assign_attrs(platform_dict)
return set_time_encodings(ds)
def _set_beam_group1_zarr_vars(self, ds: xr.Dataset) -> xr.Dataset:
"""
Modifies ds by setting all variables associated with
``Beam_group1``, that were directly written to a
temporary zarr file.
Parameters
----------
ds : xr.Dataset
Dataset representing ``Beam_group1`` filled with
all variables, besides those written to zarr
Returns
-------
A modified version of ``ds`` with the zarr variables
added to it.
"""
# TODO: In the future it would be nice to have a dictionary of
# attributes stored in one place for all of the variables.
# This would reduce unnecessary code duplication in the
# functions below.
# obtain DataArrays using zarr variables
zarr_path = self.parsed2zarr_obj.zarr_file_name
backscatter_r = self._get_power_dataarray(zarr_path)
angle_athwartship, angle_alongship = self._get_angle_dataarrays(zarr_path)
# append DataArrays created from zarr file
ds = ds.assign(
backscatter_r=backscatter_r,
angle_athwartship=angle_athwartship,
angle_alongship=angle_alongship,
)
return ds
def set_beam(self) -> List[xr.Dataset]:
"""Set the /Sonar/Beam_group1 group."""
# Channel-specific variables
params = [
"beam_type",
"beamwidth_alongship",
"beamwidth_athwartship",
"dir_x",
"dir_y",
"dir_z",
"angle_offset_alongship",
"angle_offset_athwartship",
"angle_sensitivity_alongship",
"angle_sensitivity_athwartship",
"pos_x",
"pos_y",
"pos_z",
"equivalent_beam_angle",
"gpt_software_version",
"gain",
]
beam_params = defaultdict()
for param in params:
beam_params[param] = [
self.parser_obj.config_datagram["transceivers"][ch_seq].get(param, np.nan)
for ch_seq in self.sorted_channel.keys()
]
for i, ch in enumerate(self.sorted_channel.keys()):
if (
np.isclose(beam_params["dir_x"][i], 0.00)
and np.isclose(beam_params["dir_y"][i], 0.00)
and np.isclose(beam_params["dir_z"][i], 0.00)
):
beam_params["dir_x"][i] = np.nan
beam_params["dir_y"][i] = np.nan
beam_params["dir_z"][i] = np.nan
# TODO: Need to discuss if to remove INDEX2POWER factor from the backscatter_r
# currently this factor is multiplied to the raw data before backscatter_r is saved.
# This is if we are encoding only raw data to the .nc/zarr file.
# Need discussion since then the units won't match
# with convention (though it didn't match already...).
# Assemble variables into a dataset
ds = xr.Dataset(
{
"frequency_nominal": (
["channel"],
self.freq,
{
"units": "Hz",
"long_name": "Transducer frequency",
"valid_min": 0.0,
"standard_name": "sound_frequency",
},
),
"beam_type": (
"channel",
beam_params["beam_type"],
{"long_name": "type of transducer (0-single, 1-split)"},
),
"beamwidth_twoway_alongship": (
["channel"],
beam_params["beamwidth_alongship"],
{
"long_name": "Half power two-way beam width along alongship axis of beam", # noqa
"units": "arc_degree",
"valid_range": (0.0, 360.0),
"comment": (
"Introduced in echopype for Simrad echosounders to avoid potential confusion with convention definitions. " # noqa
"The alongship angle corresponds to the minor angle in SONAR-netCDF4 vers 2. " # noqa
"The convention defines one-way transmit or receive beamwidth (beamwidth_receive_minor and beamwidth_transmit_minor), but Simrad echosounders record two-way beamwidth in the data." # noqa
),
},
),
"beamwidth_twoway_athwartship": (
["channel"],
beam_params["beamwidth_athwartship"],
{
"long_name": "Half power two-way beam width along athwartship axis of beam", # noqa
"units": "arc_degree",
"valid_range": (0.0, 360.0),
"comment": (
"Introduced in echopype for Simrad echosounders to avoid potential confusion with convention definitions. " # noqa
"The athwartship angle corresponds to the major angle in SONAR-netCDF4 vers 2. " # noqa
"The convention defines one-way transmit or receive beamwidth (beamwidth_receive_major and beamwidth_transmit_major), but Simrad echosounders record two-way beamwidth in the data." # noqa
),
},
),
"beam_direction_x": (
["channel"],
beam_params["dir_x"],
{
"long_name": "x-component of the vector that gives the pointing "
"direction of the beam, in sonar beam coordinate "
"system",
"units": "1",
"valid_range": (-1.0, 1.0),
},
),
"beam_direction_y": (
["channel"],
beam_params["dir_y"],
{
"long_name": "y-component of the vector that gives the pointing "
"direction of the beam, in sonar beam coordinate "
"system",
"units": "1",
"valid_range": (-1.0, 1.0),
},
),
"beam_direction_z": (
["channel"],
beam_params["dir_z"],
{
"long_name": "z-component of the vector that gives the pointing "
"direction of the beam, in sonar beam coordinate "
"system",
"units": "1",
"valid_range": (-1.0, 1.0),
},
),
"angle_offset_alongship": (
["channel"],
beam_params["angle_offset_alongship"],
{
"long_name": "electrical alongship angle offset of the transducer",
"comment": (
"Introduced in echopype for Simrad echosounders. " # noqa
"The alongship angle corresponds to the minor angle in SONAR-netCDF4 vers 2. " # noqa
),
},
),
"angle_offset_athwartship": (
["channel"],
beam_params["angle_offset_athwartship"],
{
"long_name": "electrical athwartship angle offset of the transducer",
"comment": (
"Introduced in echopype for Simrad echosounders. " # noqa
"The athwartship angle corresponds to the major angle in SONAR-netCDF4 vers 2. " # noqa
),
},
),
"angle_sensitivity_alongship": (
["channel"],
beam_params["angle_sensitivity_alongship"],
{
"long_name": "alongship angle sensitivity of the transducer",
"comment": (
"Introduced in echopype for Simrad echosounders. " # noqa
"The alongship angle corresponds to the minor angle in SONAR-netCDF4 vers 2. " # noqa
),
},
),
"angle_sensitivity_athwartship": (
["channel"],
beam_params["angle_sensitivity_athwartship"],
{
"long_name": "athwartship angle sensitivity of the transducer",
"comment": (
"Introduced in echopype for Simrad echosounders. " # noqa
"The athwartship angle corresponds to the major angle in SONAR-netCDF4 vers 2. " # noqa
),
},
),
"equivalent_beam_angle": (
["channel"],
beam_params["equivalent_beam_angle"],
{
"long_name": "Equivalent beam angle",
"units": "sr",
"valid_range": (0.0, 4 * np.pi),
},
),
"gain_correction": (
["channel"],
beam_params["gain"],
{"long_name": "Gain correction", "units": "dB"},
),
"gpt_software_version": (
["channel"],
beam_params["gpt_software_version"],
),
"transmit_frequency_start": (
["channel"],
self.freq,
self._varattrs["beam_var_default"]["transmit_frequency_start"],
),
"transmit_frequency_stop": (
["channel"],
self.freq,
self._varattrs["beam_var_default"]["transmit_frequency_stop"],
),
"transmit_type": (
[],
"CW",
{
"long_name": "Type of transmitted pulse",
"flag_values": ["CW"],
"flag_meanings": [
"Continuous Wave – a pulse nominally of one frequency",
],
},
),
"beam_stabilisation": (
[],
np.array(0, np.byte),
{
"long_name": "Beam stabilisation applied (or not)",
"flag_values": [0, 1],
"flag_meanings": ["not stabilised", "stabilised"],
},
),
"non_quantitative_processing": (
[],
np.array(0, np.int16),
{
"long_name": "Presence or not of non-quantitative processing applied"
" to the backscattering data (sonar specific)",
"flag_values": [0],
"flag_meanings": ["None"],
},
),
},
coords={
"channel": (
["channel"],
list(self.sorted_channel.values()),
self._varattrs["beam_coord_default"]["channel"],
),
},
attrs={"beam_mode": "vertical", "conversion_equation_t": "type_3"},
)
# Construct Dataset with ping-by-ping data from all channels
ds_backscatter = []
for ch in self.sorted_channel.keys():
var_dict = {
"sample_interval": (
["ping_time"],
self.parser_obj.ping_data_dict["sample_interval"][ch],
{
"long_name": "Interval between recorded raw data samples",
"units": "s",
"valid_min": 0.0,
},
),
"transmit_bandwidth": (
["ping_time"],
self.parser_obj.ping_data_dict["bandwidth"][ch],
{
"long_name": "Nominal bandwidth of transmitted pulse",
"units": "Hz",
"valid_min": 0.0,
},
),
"transmit_duration_nominal": (
["ping_time"],
self.parser_obj.ping_data_dict["pulse_length"][ch],
{
"long_name": "Nominal bandwidth of transmitted pulse",
"units": "s",
"valid_min": 0.0,
},
),
"transmit_power": (
["ping_time"],
self.parser_obj.ping_data_dict["transmit_power"][ch],
{
"long_name": "Nominal transmit power",
"units": "W",
"valid_min": 0.0,
},
),
"data_type": (
["ping_time"],
np.array(self.parser_obj.ping_data_dict["mode"][ch], dtype=np.byte),
{
"long_name": "recorded data type (1=power only, 2=angle only, 3=power and angle)", # noqa
"flag_values": [1, 2, 3],
"flag_meanings": ["power only", "angle only", "power and angle"],
},
),
"sample_time_offset": (
["ping_time"],
(
np.array(self.parser_obj.ping_data_dict["offset"][ch])
* np.array(self.parser_obj.ping_data_dict["sample_interval"][ch])
),
{
"long_name": "Time offset that is subtracted from the timestamp"
" of each sample",
"units": "s",
},
),
"channel_mode": (
["ping_time"],
np.array(self.parser_obj.ping_data_dict["transmit_mode"][ch], dtype=np.byte),
{
"long_name": "Transceiver mode",
"flag_values": [-1, 0, 1, 2],
"flag_meanings": ["Unknown", "Active", "Passive", "Test"],
"comment": "From transmit_mode in the EK60 datagram",
},
),
}
if not self.parsed2zarr_obj.temp_zarr_dir:
var_dict["backscatter_r"] = (
["ping_time", "range_sample"],
self.parser_obj.ping_data_dict["power"][ch],
{
"long_name": self._varattrs["beam_var_default"]["backscatter_r"][
"long_name"
],
"units": "dB",
},
)
ds_tmp = xr.Dataset(
var_dict,
coords={
"ping_time": (
["ping_time"],
self.parser_obj.ping_time[ch],
self._varattrs["beam_coord_default"]["ping_time"],
),
"range_sample": (
["range_sample"],
np.arange(self.parser_obj.ping_data_dict["power"][ch].shape[1]),
self._varattrs["beam_coord_default"]["range_sample"],
),
},
)
else:
ds_tmp = xr.Dataset(
var_dict,
coords={
"ping_time": (
["ping_time"],
self.parser_obj.ping_time[ch],
self._varattrs["beam_coord_default"]["ping_time"],
),
},
)
if not self.parsed2zarr_obj.temp_zarr_dir:
# Save angle data if exist based on values in
# self.parser_obj.ping_data_dict['mode'][ch]
# Assume the mode of all pings are identical
# 1 = Power only, 2 = Angle only 3 = Power & Angle
if np.all(np.array(self.parser_obj.ping_data_dict["mode"][ch]) != 1):
ds_tmp = ds_tmp.assign(
{
"angle_athwartship": (
["ping_time", "range_sample"],
self.parser_obj.ping_data_dict["angle"][ch][:, :, 0],
{
"long_name": "electrical athwartship angle",
"comment": (
"Introduced in echopype for Simrad echosounders. " # noqa
+ "The athwartship angle corresponds to the major angle in SONAR-netCDF4 vers 2. " # noqa
),
},
),
"angle_alongship": (
["ping_time", "range_sample"],
self.parser_obj.ping_data_dict["angle"][ch][:, :, 1],
{
"long_name": "electrical alongship angle",
"comment": (
"Introduced in echopype for Simrad echosounders. " # noqa
+ "The alongship angle corresponds to the minor angle in SONAR-netCDF4 vers 2. " # noqa
),
},
),
}
)
# Attach frequency dimension/coordinate
ds_tmp = ds_tmp.expand_dims({"channel": [self.sorted_channel[ch]]})
ds_tmp["channel"] = ds_tmp["channel"].assign_attrs(
self._varattrs["beam_coord_default"]["channel"]
)
ds_backscatter.append(ds_tmp)
# Merge data from all channels
ds = xr.merge(
[ds, xr.merge(ds_backscatter)], combine_attrs="override"
) # override keeps the Dataset attributes
if self.parsed2zarr_obj.temp_zarr_dir:
ds = self._set_beam_group1_zarr_vars(ds)
# Manipulate some Dataset dimensions to adhere to convention
self.beam_groups_to_convention(
ds, self.beam_only_names, self.beam_ping_time_names, self.ping_time_only_names
)
return [set_time_encodings(ds)]
def set_vendor(self) -> xr.Dataset:
# Retrieve pulse length, gain, and sa correction
pulse_length = np.array(
[
self.parser_obj.config_datagram["transceivers"][ch]["pulse_length_table"]
for ch in self.sorted_channel.keys()
]
)
gain = np.array(
[
self.parser_obj.config_datagram["transceivers"][ch]["gain_table"]
for ch in self.sorted_channel.keys()
]
)
sa_correction = [
self.parser_obj.config_datagram["transceivers"][ch]["sa_correction_table"]
for ch in self.sorted_channel.keys()
]
# Save pulse length and sa correction
ds = xr.Dataset(
{
"frequency_nominal": (
["channel"],
self.freq,
{
"units": "Hz",
"long_name": "Transducer frequency",
"valid_min": 0.0,
"standard_name": "sound_frequency",
},
),
"sa_correction": (["channel", "pulse_length_bin"], sa_correction),
"gain_correction": (["channel", "pulse_length_bin"], gain),
"pulse_length": (["channel", "pulse_length_bin"], pulse_length),
},
coords={
"channel": (
["channel"],
list(self.sorted_channel.values()),
self._varattrs["beam_coord_default"]["channel"],
),
"pulse_length_bin": (
["pulse_length_bin"],
np.arange(pulse_length.shape[1]),
),
},
)
return ds
|
# Solution of;
# Project Euler Problem 244: Sliders
# https://projecteuler.net/problem=244
#
# You probably know the game Fifteen Puzzle. Here, instead of numbered tiles,
# we have seven red tiles and eight blue tiles. A move is denoted by the
# uppercase initial of the direction (Left, Right, Up, Down) in which the tile
# is slid, e. g. starting from configuration (S), by the sequence LULUR we
# reach the configuration (E):(S), (E)For each path, its checksum is
# calculated by (pseudocode):checksum = 0checksum = (checksum × 243 + m1) mod
# 100 000 007checksum = (checksum × 243 + m2) mod 100 000 007 …checksum =
# (checksum × 243 + mn) mod 100 000 007where mk is the ASCII value of the kth
# letter in the move sequence and the ASCII values for the moves
# are:L76R82U85D68For the sequence LULUR given above, the checksum would be
# 19761398. Now, starting from configuration (S),find all shortest ways to
# reach configuration (T). (S), (T)What is the sum of all checksums for the
# paths having the minimal length?
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 244
timed.caller(dummy, n, i, prob_id)
|
import numpy as np
'''
Created on 2018年9月14日
@author: DELL
'''
class KD_node:
def __init__(self,elt=None,split=None,LL=None,RR=None):
self.elt = elt
self.split = split
self.LL = LL
self.RR = RR
def createKDTree(dataList):
if len(dataList) == 0:
return
dimension = len(dataList[0])
split = 0.0
max_var = 0.0
for i in range(dimension):
ll = [example[i] for example in dataList]
var = computeVariance(ll)
if var >= max_var :
max_var = var
split = i
sortedDataList = sorted(dataList,key= lambda x : x[split],reverse=False)
elt = sortedDataList[int(len(sortedDataList)/2)]
root = KD_node(elt,split)
root.LL = createKDTree(sortedDataList[0:int(len(dataList)/2)])
root.RR = createKDTree(sortedDataList[int(len(dataList)/2+1):])
return root
def computeVariance(list):
list = [float(example) for example in list]
array = np.array(list).reshape(-1,1)
sum = array.sum()
mean = sum/float(len(list))
varVec = (array - np.tile([mean],(len(list),1)))**2/float(len(list))
return varVec.sum()
def findNN(root, query,k=3):
nnList = []
NN = root.elt
min_dist = computeDist(query, NN)
nnList.append([NN,min_dist])
nodeList = []
temp_root = root
while temp_root:
nodeList.append(temp_root)
dd = computeDist(query, temp_root.elt)
if min_dist > dd:
NN = temp_root.elt
min_dist = dd
nnList.append([NN,min_dist])
ss = temp_root.split
if query[ss] <= temp_root.elt[ss]:
temp_root = temp_root.LL
else:
temp_root = temp_root.RR
while nodeList:
back_elt = nodeList.pop()
ss = back_elt.split
print ("back.elt = ", back_elt.elt)
if abs(query[ss] - back_elt.elt[ss]) < min_dist:
if query[ss] <= back_elt.elt[ss]:
temp_root = back_elt.RR
else:
temp_root = back_elt.LL
if temp_root:
nodeList.append(temp_root)
curDist = computeDist(query, temp_root.elt)
if min_dist > curDist:
min_dist = curDist
NN = temp_root.elt
nnList.append([NN,min_dist])
return sorted(nnList,key=lambda item:item[1])
def computeDist(pt1, pt2):
pt1Array = np.array(pt1);pt2Array = np.array(pt2)
return (((pt1Array-pt2Array)**2).sum())**(1/2)
dataList = [[2,3],[5,4],[9,6],[4,7],[8,1],[7,2]]
print(findNN(createKDTree(dataList=dataList),[3,4.5]))
|
"""
Configuration file. All constants should go here.
"""
from .version import __version__, __build__, __date__, __commit__
class Config:
"""
This class contains the application configuration values
"""
version = __version__
build = __build__
date = __date__
commit = __commit__
|
from rest_framework.exceptions import APIException
from apps.endpoints.models import Recommendation, Song
from unittest import mock
from django.test import TestCase
from rest_framework.test import APIClient
from .test_utils import populate_full_db, getSingleSpotifyTrackJSON, getTestSpotifyRecommendationJSON
def get_complete_user_request():
return {
'song': 'cheapskate',
'artist': 'oliver tree',
'spotifyObj': getSingleSpotifyTrackJSON()
}
def get_incomplete_user_request():
return {
'song': 'cheapskate',
'artist': 'oliver tree',
}
@mock.patch('apps.endpoints.services.spotify.SpotifyService.get_song_recommendations', return_value=getTestSpotifyRecommendationJSON())
class TestSpotifyRecommendations(TestCase):
'''
Test API response for fetching Spotify Recommendations, only one copy of a track object
is sent in the response in order to reduce data redundancy. Hence `recommended_track_ids`
and `recommended_tracks` are unequal because of 2 same tracks in the recommendation response
This will practically never happen in real case because Spotify isn't stupid to recommend you
duplicate tracks in a single response.
'''
url = '/api/spotify/recommendations'
def test_recommendations_complete_request(self, get_song_recommendations):
client = APIClient()
payload = get_complete_user_request()
response = client.post(self.url, payload, format='json')
# Explicitly defining duplicates for a rare test case
duplicates = 2
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.data['recommendation_id'], str)
self.assertEqual(len(response.data['recommended_track_ids']), 38)
self.assertEqual(len(response.data['recommended_tracks']), 36)
self.assertEqual(
len(response.data['recommended_track_ids']) - duplicates,
len(response.data['recommended_tracks'])
)
def test_recommendations_incomplete_request(self, get_song_recommendations):
client = APIClient()
payload = get_incomplete_user_request()
response = client.post(self.url, payload, format='json')
self.assertEqual(response.status_code, 400)
class TestSpiders(TestCase):
@classmethod
def setUpTestData(cls):
'''
Setup models for all tests in the class
'''
cls.recommendation_id = populate_full_db()
def test_models_created_correctly(self):
'''
Check the data models are populated and linked together correctly
'''
songs = Song.objects.all()
recommendations = Recommendation.objects.get(pk=self.recommendation_id)
requested_song_ids = recommendations.userRequest.selectedTracks.all().values_list('track', flat=True)
recommended_song_ids = recommendations.selectedTracks.all().values_list('track', flat=True)
self.assertEqual(len(songs), 3)
self.assertEqual(songs[0].id, requested_song_ids[0])
self.assertEqual(songs[1].id, recommended_song_ids[0])
self.assertEqual(songs[2].id, recommended_song_ids[1])
@mock.patch('spiders.lyricraper.initiator.crawl', return_value='test_spider_job_id1234')
def test_spider_start(self, crawl):
'''
Test spider job is started successfully for the given recommendation object ID
'''
client = APIClient()
payload = {
'recommendation_id': self.recommendation_id
}
url = '/api/spiders/start'
response = client.post(url, payload, format='json')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['status'], 'spider_running')
self.assertEqual(response.data['job_id'], 'test_spider_job_id1234')
def test_lyrics_fetch(self):
'''
Test lyrics response is fetched correctly for the given recommendation object ID
'''
client = APIClient()
payload = {
'recommendation_id': self.recommendation_id
}
url = '/api/show_lyrics'
response = client.post(url, payload, format='json')
duplicates = 0
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response.data['recommendation_id'], str)
self.assertEqual(len(response.data['recommended_track_ids']), 2)
self.assertEqual(len(response.data['recommended_tracks']), 2)
self.assertEqual(
len(response.data['recommended_track_ids']) - duplicates,
len(response.data['recommended_tracks'])
)
# def test_fetch_accesstoken(self):
# client = APIClient()
# url = '/api/spotify/accesstoken'
# response = client.get(url)
# self.assertEqual(response.status_code, 200)
# self.assertIn('access_token', response.data)
# def test_spider_jobs_status(self):
# client = APIClient()
# url = '/api/spiders/status'
# response = client.get(url)
# self.assertEqual(response.status_code, 200)
# self.assertIn('running', response.data)
|
london_co = {
"r1": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.1"
},
"r2": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "4451",
"ios": "15.4",
"ip": "10.255.0.2"
},
"sw1": {
"location": "21 New Globe Walk",
"vendor": "Cisco",
"model": "3850",
"ios": "3.6.XE",
"ip": "10.255.0.101",
"vlans": "10,20,30",
"routing": True
}
}
k = london_co.keys()
dev = input('Enter device name: ')
param = input('Input device parametr ({}): ').format(k)
#k = london_co['{device}']
#print('\n'+'-'*30)
print(london_co[dev][param])
|
"""
Given a string, determine if a permutation of the string could form a palindrome.
For example,
"code" -> False, "aab" -> True, "carerac" -> True.
"""
import string
def canPermutePalindrome(s):
"""
:type s: str
:rtype: bool
"""
aset = set()
odd_cnt = 0
for i in s:
# if the charactor is present in the set then continue
if i in aset:
continue
cnt = s.count(i)
if cnt == len(s):
return True
# if the charactor with odd count is more than one then
# number can not be palindrome. Return False.
if cnt % 2 != 0:
if odd_cnt > 0:
return False
odd_cnt += 1
# add charactor in the set
aset.add(i)
return True
print (canPermutePalindrome("aabbccc"))
print (canPermutePalindrome("code"))
print (canPermutePalindrome("aab"))
print (canPermutePalindrome("carerac"))
print (canPermutePalindrome("AaBb//a"))
|
# Endpoint for covid data api
COVID_ENDPT = 'https://api.covidtracking.com/v1/states/'
CONFIG_FILEPATH = 'config.ini'
KEYWORDS = {"covid", "coronavirus", "corona", "pandemic", "quarantine", "covid-19"}
|
"""
Plugins are defined here, because Airflow will import all modules in `plugins` dir but not packages (folders).
"""
from airflow.plugins_manager import AirflowPlugin
from .RedshiftTableConstraintOperator import RedshiftTableConstraintOperator
from .SQLTemplatedPythonOperator import SQLTemplatedPythonOperator
from .RedshiftJoinCheckOperator import RedshiftJoinCheckOperator
class CustomOperators(AirflowPlugin):
name = 'custom_operators'
operators = [RedshiftTableConstraintOperator, SQLTemplatedPythonOperator, RedshiftJoinCheckOperator]
hooks = []
executors = []
macros = []
admin_views = []
flask_blueprints = []
menu_links = []
if __name__ == '__main__':
pass
|
print (" Willkommen zum ultimativen Lego-Treppen-Kalkulator!!!")
print (" # ")
print (" # # ")
print (" # # # ")
print (" # # # # ")
print (" in memoriam GRAF ZAHL")
reihen = int ( input ("Wieviele Reihen baust Du?") )
kloetze = 0
for reihen in range (1, reihen + 1):
kloetze = kloetze + reihen
print ( "Du brauchst ", kloetze, " Klötzchen, um die Treppe zu bauen.")
|
import tensorflow as tf
import numpy as np
import cv2
import os
import matplotlib.pyplot as plt
import _config as cfg
import _util as util
import _model_2_100x30 as model
'''
1) config.py: dataDir
2) util.py: 적용 model
2) train_result.py: 적용 model
'''
''' ============================= '''
''' test set 읽기(목록!!!) '''
''' ============================= '''
# 전체 Data 읽기
testSet = util.ReadCSV(cfg.testFile)
testSet = np.array(testSet)
test_num = len(testSet)
''' ============================= '''
''' 초기 설정 '''
''' ============================= '''
# Logging Type 지정
# tf.logging.set_verbosity(tf.logging.ERROR)
# 학습 데이터 저장소
saver = tf.train.Saver()
# sess = tf.InteractiveSession()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# 학습 결과 data를 읽음
saver.restore(sess, cfg.saveData + "/model.ckpt")
print('---------------------------------')
print("### Load save Data ###")
print('---------------------------------')
''' ============================= '''
''' Test '''
''' ============================= '''
num = np.zeros(cfg.nb_classes, dtype=np.int32) # nb_classes = 6
correct = np.zeros(cfg.nb_classes, dtype=np.int32)
num_total = 0
correct_total = 0
# 초기 Directory가 없다면 새로 생성
if not os.path.exists(cfg.trainData + '/PASS/'):
os.makedirs(cfg.trainData + '/PASS/')
if not os.path.exists(cfg.trainData + '/FAIL/'):
os.makedirs(cfg.trainData + '/FAIL/')
''' 각 방향별 정확도 확인 '''
for i in range(test_num):
image = util.LoadImgData(testSet[i:i+1])
steering = int(testSet[i, 1])
predict = model.Y_.eval(session=sess,
feed_dict={model.X: image, model.keep_prob: 1.0})
predict = np.argmax(predict)
num[steering] += 1
# 성공을 count하고, 성공과 실패에 따라 이미지 저장
if predict == steering:
correct[predict] += 1
cv2.imwrite(cfg.trainData + '/PASS/' + cfg.str_steering[0][predict] +
'(-' + testSet[i:i+1, 0][0],
image[0] * 255) # cv2로 저장할때는 255를... scipy와 다르다.
else:
cv2.imwrite(cfg.trainData + '/FAIL/' + cfg.str_steering[0][predict] +
'(-' + testSet[i:i+1, 0][0],
image[0] * 255) # cv2로 저장할때는 255를... scipy와 다르다.
''' 각 방향별 정확도 출력 '''
for i in range(1, cfg.nb_classes):
print("%s: %d/%d, accuracy: %.2f%%"
% (cfg.str_steering[0][i], correct[i], num[i],
correct[i]/num[i].astype(float)*100))
num_total += num[i]
correct_total += correct[i]
''' 평균 정확도 출력 '''
print("Total: %d/%d, accuracy: %.2f%%"
% (correct_total, num_total,
correct_total/num_total.astype(float)*100))
'''
# image 그리기
for i in range(cfg.batch_size):
plt.subplot(4, 2, i+1)
plt.imshow(x_img[i], cmap='Greys')
plt.show()
'''
|
# coding=utf-8
'''
Created on 2020-4-2
@author: jiangao
Project: one piece 漫画爬取
'''
import requests
import threading
import sys
import io
import time
import os
import re
import socket
header = {
'Content-Type': 'text/plain; charset=UTF-8',
'User-Agent':'Chrome/80.0.3396.99 Safari/537.36'
}
#下载海报
def save_image_file(url,path):
jd = requests.get(url,headers = header)
time.sleep(2)
socket.setdefaulttimeout(20)
if jd.status_code == 200:
print('work')
time.sleep(1)
with open(path,'wb') as e:
e.write(jd.content)
e.close()
return 0
else:
return 1
def save(op,ed):
a = 'http://mhua.zerobyw4.com/manhua/ON9EP9IE6CE/'
c = '.jpg'
for j in range(op,op+1):
path = 'D:/one/' + str(j) +'/'
for i in range(ed+1,250):
if i<10:
print("第"+str(i)+"张"+" "+"("+"第"+str(j)+"卷"+")")
h = a + str(j) + '/' + '00' + str(i) +c
b = save_image_file(h,path + '00' + str(i) +c)
if b == 1:
break
elif i>=10 and i<100:
print("第"+str(i)+"张"+" "+"("+"第"+str(j)+"卷"+")")
h = a + str(j) + '/' + '0' + str(i) +c
b = save_image_file(h,path + '0' + str(i) +c)
if b == 1:
break
else:
print("第"+str(i)+"张"+" "+"("+"第"+str(j)+"卷"+")")
h = a + str(j) + '/' + str(i) +c
b = save_image_file(h,path + str(i) +c)
if b == 1:
break
def save1():
listos = os.listdir('D:\\one')
for i in range(94,95):
listoss = os.listdir('D:\\one\\'+str(i))
j = listoss[-1:]
strinfo = re.compile('.jpg')
one = strinfo.sub('',j[0])
save(int(i),int(one))
if __name__ == '__main__':
url = 'http://mhua.zerobyw4.com/manhua/O9NI9EC6E/94/001.jpg'
path = 'D:\\one\\94\\001.jpg'
save_image_file(url,path)
|
"""
Mohammad Rifat Arefin
ID: 1001877262
"""
import os
import tkinter as tk
import socket
import threading
import pickle
from orderedset import OrderedSet #FIFO data structure for lexicon
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfile
import time
server_port = 12100 #primary server
polling_port = 12105 #polling on primary server
client_socket = None
lexicon = OrderedSet()
polled = "" #keep track of polled contents
send_start = 0 #keep track when file transfer starts
disconnected = False #check if primary server disconnected
backup = 0 #check if connected to backup
lock = threading.Lock() #for isolation
username = None
poll_socket = None
""" Prompt user to choose file to send """
def open_file(root):
global send_start
lock.acquire()
send_start = 1
client_socket.send("data".encode()) #send header
ack = client_socket.recv(1024).decode() #recv ack
print("ack "+ ack + str(len(ack)))
# client_socket.send("Got it!".encode())
path = open(askopenfilename(),'rb') #file chooser
client_socket.sendfile(path) #send file for spell check
root.grid_remove()
root = tk.Frame(master, width=500, height=200)
root.grid()
get = client_socket.recv(1024).decode() #recv checked file
# while(get==""):
# get = client_socket.recv(1024).decode() #reattempt if data not received
lock.release()
print("get "+get)
lexicon.clear()
client_socket.close()
tk.Label(root, text="Upload complete\nSpell checking complete").grid()
tk.Button(root, text = "Save as",command=lambda: save_file(root,get)).grid()
root.tkraise()
""" Prompt user for destination location of spell checked file """
def save_file(root, data):
print("data"+data)
file = asksaveasfile() #destination location
file.write(data)
file.close()
root.grid_remove()
root = tk.Frame(master, width=500, height=200)
root.grid()
tk.Label(root,text="Spell checking complete").grid()
tk.Label(root,text="Successfully saved").grid()
tk.Button(root, text = "Quit",command=lambda: quit_app()).grid()
tk.Button(root, text = "Start over", command=lambda: restart(root)).grid()
root.tkraise()
""" Show lexicon to user """
def show_lexicon(frame, label):
global disconnected
if disconnected == True: #notify user if primary server disconnected
label['text'] = "SERVER DISCONNECTED\n"
disconnected = False
else:
label['text'] = ""
label['text'] += "Lexicon Queue " + str(lexicon)
global polled
if (polled != ""): #check if server polled
label['text'] += "\nPolled content by server:" + polled
polled = "" #reset GUI content
master.after(2000, show_lexicon, frame, label) #refresh GUI
""" add lexicon to queue """
def add_lexicon(entry):
lexicon.add(entry.get())
entry.delete(0,'end')
""" receive polling req """
def polling_req(frame):
while(1):
req = poll_socket.recv(1024).decode() #recv header
print(req+" req "+str(len(req)))
if(len(req)==0): #if primary server disconnects
global disconnected, backup
disconnected = True
global server_port, polling_port
server_port = 12101
polling_port = 12106
send_username(frame) #check with backup port no.
break
elif(req=="polling"):
try:
lock.acquire()
client_socket.send("lexicon".encode()) #send header
print(client_socket.recv(1024).decode()) #recv ack
lex = pickle.dumps(lexicon)
client_socket.send(lex) #send lexicon
lock.release()
global polled
if len(lexicon)>0:
polled = str(lexicon)
lexicon.clear() #clear after being polled
except:
lock.release()
break
print("break")
""" connect with the server and register a username """
def send_username(root_frame, entry = None):
if entry != None:
global username
username = entry.get()
print("username: %s" % (username))
global client_socket, server_port, poll_socket, polling_port
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) #create socket
poll_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
client_socket.connect(('localhost', server_port)) #connect with server
client_socket.send(username.encode())
root_frame.grid_remove()
msg = client_socket.recv(1024).decode() #receive server status
frame2 = tk.Frame(master)
frame2.grid()
if(msg == "ok"): #servers sends ok if name available
tk.Label(frame2, text="Welcome "+username).grid()
if entry == None:
tk.Label(frame2, text="Connected to Backup Server").grid()
tk.Button(frame2, text="Choose file", command=lambda:open_file(frame2)).grid()
dummy = tk.Label(frame2, text="")
dummy.grid()
lex_input = tk.Entry(frame2, bd=5)
lex_input.grid()
tk.Button(frame2, text="Insert lexicon", command=lambda:add_lexicon(lex_input)).grid() #add lexicon through GUI
tk.Button(frame2, text = "Quit",command=quit_app).grid()
show_lexicon(frame2, dummy) #GUI for lexicon
frame2.tkraise()
poll_socket.connect(('localhost', polling_port))
poll_socket.send("hello".encode())
threading.Thread(target=polling_req, args= (frame2,)).start() #new thread for checking polling req
else: #server rejects if name not available
if(msg == "duplicate"):
tk.Label(frame2, text="Name not available").grid()
elif(msg == "full_capacity"): #server rejects if already 3 clients running
tk.Label(frame2, text="Already 3 clients running").grid()
tk.Button(frame2, text = "Quit",command=quit_app).grid()
tk.Button(frame2, text = "Retry", command=lambda: restart(frame2)).grid()
frame2.tkraise()
except:
global backup
if backup == 0:
server_port = 12101
polling_port = 12106
backup = 1
send_username(root_frame, entry)
return
root_frame.grid_remove() #Notify username if server not connected
frame2 = tk.Frame(master)
frame2.grid()
tk.Label(frame2, text="Server not connected").grid()
tk.Button(frame2, text="Quit", command=quit_app).grid()
tk.Button(frame2, text = "Retry", command=lambda: restart(frame2)).grid()
""" restart client """
def restart(root):
root.grid_remove()
client_socket.close()
poll_socket.close()
global send_start, disconnected, backup, server_port, polling_port
server_port = 12100
polling_port = 12105
backup = 0
send_start = 0
disconnected = False
landing()
""" quit application """
def quit_app():
master.destroy()
client_socket.close()
os._exit(1)
"""Prompt user for username """
def landing():
root_frame = tk.Frame(master, width=300, height=100)
root_frame.grid()
tk.Label(root_frame, text="Username").grid(row=0, column=0)
entry = tk.Entry(root_frame, bd=5)
entry.grid(row=0, column=1)
tk.Button(root_frame, text="confirm",
command=lambda: send_username(root_frame, entry = entry)).grid(row=1)
master = tk.Tk() # draw client window
master.title("client")
master.geometry("500x200")
landing() #prompt for username
tk.mainloop()
client_socket.close()
|
from collections import Counter
N = int(input())
src = [int(input()) for i in range(N)]
ctr = Counter(src)
print(len(ctr.items()))
|
import matplotlib.pyplot as plt
import numpy as np
import pickle
import argparse
from os import path
import re, string, unicodedata
import nltk
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
# Get the necessary class structure and connect to the DB
from db_config import Subreddit, Submission
from mongoengine import *
db = connect('reddalysis', host='localhost', port=27017)
############################################
########## DATA HELPER FUNCTIONS ###########
############################################
def get_submissions(subreddit_name):
data_path = 'data/{}_top.pickle'.format(subreddit_name)
if (not path.exists(data_path)):
print("ERROR: Files for r/{} not found. Please run \'python store.py -s {} -p\' first.".format(subreddit_name, subreddit_name))
return None
full_post_dict = {}
# USING PICKLE FILES
with open('data/{}_top.pickle'.format(subreddit_name), 'rb') as handle:
full_post_dict.update(pickle.load(handle))
with open('data/{}_hot.pickle'.format(subreddit_name), 'rb') as handle:
full_post_dict.update(pickle.load(handle))
with open('data/{}_random.pickle'.format(subreddit_name), 'rb') as handle:
full_post_dict.update(pickle.load(handle))
with open('data/{}_controversial.pickle'.format(subreddit_name), 'rb') as handle:
full_post_dict.update(pickle.load(handle))
print('Loaded {} submissions'.format(len(list(full_post_dict.keys()))))
return full_post_dict
############################################
####### TEXT PREPROCESSING FUNCTIONS #######
############################################
def remove_non_ascii(words):
"""Remove non-ASCII characters from list of tokenized words"""
new_words = []
for word in words:
new_word = unicodedata.normalize('NFKD', word).encode('ascii', 'ignore').decode('utf-8', 'ignore')
new_words.append(new_word)
return new_words
def remove_hyperlinks(words):
"""Remove hyperlinks from list of tokenized words"""
new_words = []
for word in words:
new_word = re.sub(r'^https?:\/\/.*[\r\n]*', '', word)
new_words.append(new_word)
return new_words
def to_lowercase(words):
"""Convert all characters to lowercase from list of tokenized words"""
new_words = []
for word in words:
new_word = word.lower()
new_words.append(new_word)
return new_words
def remove_punctuation(words):
"""Remove punctuation from list of tokenized words"""
new_words = []
for word in words:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_words.append(new_word)
return new_words
def remove_stopwords(words):
"""Remove stop words from list of tokenized words"""
new_words = []
for word in words:
if word not in stopwords.words('english'):
new_words.append(word)
return new_words
def lemmatize_verbs(words):
"""Lemmatize verbs in list of tokenized words"""
lemmatizer = WordNetLemmatizer()
lemmas = []
for word in words:
lemma = lemmatizer.lemmatize(word, pos='v')
lemmas.append(lemma)
return lemmas
def normalize(words):
words = remove_hyperlinks(words)
words = remove_non_ascii(words)
words = to_lowercase(words)
words = remove_punctuation(words)
words = remove_stopwords(words)
return words
def bag_of_words(words):
word2count = {}
for word in words:
if word not in word2count.keys():
word2count[word] = 1
else:
word2count[word] += 1
return word2count
############################################
############## MAIN FUNCTIONS ##############
############################################
def process_bow(words):
words = nltk.word_tokenize(words)
words = normalize(words)
bow = bag_of_words(words)
return bow
def subreddit_to_bow(subreddit_name):
full_post_dict = get_submissions(subreddit_name)
if (full_post_dict is None): return
full_text = ""
for post in full_post_dict:
curr_post = full_post_dict[post]
full_text = full_text + curr_post['title'] + " " + curr_post['selftext'] + " " + ' '.join(curr_post['top_comments']) + " "
bow = process_bow(full_text)
with open('./data/{}_{}.pickle'.format(subreddit_name, 'bow'), 'wb') as handle:
pickle.dump(bow, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('created bow file for: {}'.format(subreddit_name))
return process_bow(full_text)
def subreddit_to_yearly_bow(subreddit_name):
full_post_dict = get_submissions(subreddit_name)
if (full_post_dict is None): return
yearly_texts = {}
for post in full_post_dict:
curr_post = full_post_dict[post]
year = curr_post['year']
curr_text = curr_post['title'] + " " + curr_post['selftext'] + " " + ' '.join(curr_post['top_comments']) + " "
if year not in yearly_texts.keys():
yearly_texts[year] = curr_text
else:
yearly_texts[year] = yearly_texts[year] + curr_text
yearly_bow = {}
for year in yearly_texts:
yearly_bow[year] = process_bow(yearly_texts[year])
with open('./data/{}_{}.pickle'.format(subreddit_name, 'yearly_bow'), 'wb') as handle:
pickle.dump(yearly_bow, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('created yearly bow file for: {}'.format(subreddit_name))
return yearly_bow
def get_bow(subreddit_name):
data_path = 'data/{}_bow.pickle'.format(subreddit_name)
if (not path.exists(data_path)): subreddit_to_bow(subreddit_name)
with open(data_path, 'rb') as handle:
return pickle.load(handle)
def get_yearly_bow(subreddit_name):
data_path = 'data/{}_yearly_bow.pickle'.format(subreddit_name)
if (not path.exists(data_path)): subreddit_to_yearly_bow(subreddit_name)
with open(data_path, 'rb') as handle:
return pickle.load(handle)
def main():
# # USING MONGODB
# # Prints all submissions text that has score greater than 5000
# # More querying documentation: https://docs.mongoengine.org/guide/querying.html
# for subm in Submission.objects(score__gt=5000):
# print('score: {}, title: {}'.format(subm.score, subm.title))
# # Save the year of all collected posts and plot that as a histogram
# year_array = []
# for subm in Submission.objects:
# year_array.append(subm.year)
# y = np.array(year_array)
# plt.hist(y)
# plt.show()
# # USING PICKLE FILES
# full_post_dict = get_submissions('learnmachinelearning')
# year_array = []
# for item in full_post_dict:
# year_array.append(full_post_dict[item]['year'])
# y = np.array(year_array)
# plt.hist(y)
# plt.show()
subreddit_name = args.subreddit
# To create the BOW pickle files
subreddit_to_bow(subreddit_name)
subreddit_to_yearly_bow(subreddit_name)
# # To get the saved BOWs
# print(get_bow('learnmachinelearning'))
# print(get_yearly_bow('learnmachinelearning')[2016])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--subreddit', help="subreddit to scrape and store data from")
args = parser.parse_args()
main() |
import json,pickle
import tesy_json
try:
f_date = pickle.loads(tesy_json.date)
t_date = pickle.loads(tesy_json.date3)
print(t_date)
except Exception as e:
print(e)
import hashlib
md5 = hashlib.md5
x = 'sjlkfjdslfd'
md5(x)
print(x)
|
import tensorflow as tf
from spectral_norm import SpectralNormalization
from tensorflow_addons.layers import InstanceNormalization
class ConvLayer(tf.keras.layers.Layer):
def __init__(self, out_channels, kernel_size, stride, padding=None, data_format="channels_last"):
super().__init__()
self.data_format = data_format
self.kernel_size = kernel_size
self.padding = padding
self.conv2d = SpectralNormalization(
tf.keras.layers.Conv2D(filters=out_channels,
kernel_size=kernel_size,
strides=stride,
kernel_initializer="truncated_normal",#weights_init,
data_format=data_format
))
def call(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
def reflection_pad(self, x):
"""
paddings must be less than the dimension size
"""
if self.data_format == "channels_first":
d_C, d_H, d_W = 1,2,3
elif self.data_format == "channels_last":
d_C, d_H, d_W = 3,1,2
B, C, H, W = x.shape[0], x.shape[d_C], x.shape[d_H], x.shape[d_W]
if H == 1 or W == 1:
return x
if self.padding is None:
padding = self.kernel_size // 2 # "same" padding
else:
padding = self.padding
if padding==0 or (not padding<H) or (not padding<W):
return x
padding_list = [[0,0]]*4
padding_list[d_H] = [padding, padding]
padding_list[d_W] = [padding, padding]
paddings = tf.constant(padding_list)
return tf.pad(x, paddings, "REFLECT")
class SelfAttention(tf.keras.layers.Layer):
def __init__(self, channels, data_format="channels_last"):
super().__init__()
self.data_format = data_format
assert(channels>8), "channels has to larger than 8!"
self.q = ConvLayer(channels//8, 1, 1, 0)
self.k = ConvLayer(channels//8, 1, 1, 0)
self.v = ConvLayer(channels, 1, 1, 0)
self.gamma = tf.Variable(shape=(1,), initial_value=[0.])
def call(self, x): #BxHxWxC
"""
inputs:
x with shape: (B, H, W, C)
outputs:
out with shape: (B, H, W, C)
"""
if self.data_format == "channels_first":
d_C, d_H, d_W = 1,2,3
elif self.data_format == "channels_last":
d_C, d_H, d_W = 3,1,2 # x.shape (B, H, W, C)
B, C, H, W = x.shape[0], x.shape[d_C], x.shape[d_H], x.shape[d_W]
#print("B, C, H, W = ", B, C, H, W)
Q = self.q(x) #BxHxWxC', C'=C//8
K = self.k(x) #BxHxWxC'
V = self.v(x) #BxHxWxC
#print(Q.shape, K.shape, V.shape)
if self.data_format == "channels_last":# change to channel_first for matrix multiply
trans = lambda t : tf.transpose(t, perm=[0, d_C, d_H, d_W])
Q = trans(Q) #BxC'xHxW, C'=C//8
K = trans(K) #BxC'xHxW
V = trans(V) #BxC xHxW
#print(Q.shape, K.shape, V.shape)
flat = lambda t : tf.reshape(t, shape=(B, -1, H*W) )
Q = flat(Q) #BxC'xN, N=H*W
K = flat(K) #BxC'xN
V = flat(V) #BxC xN
#print(Q.shape, K.shape, V.shape)
Q_ = tf.transpose(Q, perm=[0, 2, 1])#BxNxC'
attention = tf.nn.softmax(Q_@K, axis=-1) # (BxNxC') dot (BxC'xN) = BxNxN
out = V@attention # (BxCxN) dot (BxNxN) = BxCxN
out = tf.reshape(out, shape=(B, C, H, W) ) # BxCxHxW
if self.data_format == "channels_last":
out = tf.transpose(out, perm=[0, 2, 3, 1]) # change back
gamma = tf.broadcast_to(self.gamma, out.shape)
out = gamma*out + x
return out #BxHxWxC
class AdaIn(tf.keras.layers.Layer):
def __init__(self, data_format="channels_last"):
super().__init__()
self.data_format = data_format
self.eps = 1e-5
def call(self, x, mean_style, std_style):
"""
inputs:
mean_style with shape: (B, C) or (B, C, 1)
std_style with shape: (B, C) or (B, C, 1)
NOTE: need to broadcast to the same dimension for +/-/*// operation
"""
if self.data_format == 'channels_first':
return self.call_channel_first(x, mean_style, std_style)
elif self.data_format == "channels_last":
return self.call_channel_last(x, mean_style, std_style)
def call_channel_last(self, x, mean_style, std_style):
B, H, W, C = x.shape
feature = tf.reshape(x, (B, -1, C)) # shape: (B, H*W, C)
# feature mean and stddev
std_feat = tf.math.reduce_std(feature,axis=1) + self.eps # shape: (B, C)
mean_feat = tf.math.reduce_mean(feature,axis=1) # shape: (B, C)
# broadcast , before shape: (B, C) or (B, C, 1) , after shape: (B, H*W, C)
broadcast = lambda t : tf.broadcast_to(tf.reshape(t, (B, 1, C)), feature.shape)
std_style = broadcast(std_style)
mean_style = broadcast(mean_style)
std_feat = broadcast(std_feat)
mean_feat = broadcast(mean_feat)
adain = std_style * (feature - mean_feat) / std_feat + mean_style # shape: (B, H*W, C)
adain = tf.reshape(adain, (B, H, W, C))
return adain
def call_channel_first(self, x, mean_style, std_style):
B, C, H, W = x.shape
feature = tf.reshape(x, (B, C, -1)) # shape: (B, C, H*W)
# feature mean and stddev
std_feat = tf.math.reduce_std(feature,axis=2) + self.eps # shape: (B, C)
mean_feat = tf.math.reduce_mean(feature,axis=2) # shape: (B, C)
# broadcast , before shape: (B, C) or (B, C, 1) , after shape: (B, C, H*W)
broadcast = lambda t : tf.broadcast_to(tf.reshape(t, (B, C, 1)), feature.shape)
std_style = broadcast(std_style)
mean_style = broadcast(mean_style)
std_feat = broadcast(std_feat)
mean_feat = broadcast(mean_feat)
adain = std_style * (feature - mean_feat) / std_feat + mean_style # shape: (B, C, H*W)
adain = tf.reshape(adain, (B, C, H, W))
return adain
class ResidualBlockDown(tf.keras.layers.Layer):
def __init__(self, out_channels, kernel_size=3, stride=1, padding=None):
super().__init__()
# Right Side
self.conv_r1 = ConvLayer(out_channels, kernel_size, stride, padding)
self.conv_r2 = ConvLayer(out_channels, kernel_size, stride, padding)
# Left Side
self.conv_l = ConvLayer(out_channels, 1, 1)
def call(self, x):
residual = x
# Right Side
out = tf.nn.relu(x)
out = self.conv_r1(out)
out = tf.nn.relu(out)
out = self.conv_r2(out)
out = tf.keras.backend.pool2d(out, (2,2), strides=(2,2))
# Left Side
residual = self.conv_l(residual)
residual = tf.keras.backend.pool2d(residual, (2,2), strides=(2,2))
# Merge
out = residual + out
return out
class ResidualBlockUp(tf.keras.layers.Layer):
def __init__(self, out_channels, kernel_size=3, stride=1, upsample=2):
super().__init__()
# General
self.upsample = tf.keras.layers.UpSampling2D(size=(upsample, upsample))
# Right Side
self.norm_r1 = InstanceNormalization()
self.conv_r1 = ConvLayer(out_channels, kernel_size, stride)
self.norm_r2 = InstanceNormalization()
self.conv_r2 = ConvLayer(out_channels, kernel_size, stride)
# Left Side
self.conv_l = ConvLayer(out_channels, 1, 1)
def call(self, x, training=True):
residual = x
# Right Side
out = self.norm_r1(x, training=training)
out = tf.nn.relu(out)
out = self.upsample(out)
out = self.conv_r1(out)
out = self.norm_r2(out, training=training)
out = tf.nn.relu(out)
out = self.conv_r2(out)
# Left Side
residual = self.upsample(residual)
residual = self.conv_l(residual)
# Merge
out = residual + out
return out
class ResidualBlock(tf.keras.layers.Layer):
def __init__(self, channels):
super().__init__()
self.conv1 = ConvLayer(channels, kernel_size=3, stride=1)
self.in1 = InstanceNormalization()
self.conv2 = ConvLayer(channels, kernel_size=3, stride=1)
self.in2 = InstanceNormalization()
def call(self, x, training=True):
residual = x
out = self.conv1(x)
out = self.in1(out, training=training)
out = tf.nn.relu(out)
out = self.conv2(out)
out = self.in2(out, training=training)
out = out + residual
return out
class AdaptiveResidualBlockUp(tf.keras.layers.Layer):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, upsample=2, emb_size=512, ):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
# General
self.upsample = tf.keras.layers.UpSampling2D(size=(upsample, upsample))
# Right Side
self.norm_r1 = AdaIn()
self.conv_r1 = ConvLayer(out_channels, kernel_size, stride)
self.norm_r2 = AdaIn()
self.conv_r2 = ConvLayer(out_channels, kernel_size, stride)
# Left Side
self.conv_l = ConvLayer(out_channels, 1, 1)
# for Adaptive ADAIN - projection layer
self.linear1 = tf.keras.layers.Dense(in_channels*2, input_shape=(emb_size,), use_bias=False)
self.linear2 = tf.keras.layers.Dense(out_channels*2, input_shape=(emb_size,), use_bias=False)
def call(self, x, emb): # emb: fixed size= 512
mean1, std1 = tf.split(self.linear1(emb), num_or_size_splits=2, axis=1)
mean2, std2 = tf.split(self.linear2(emb), num_or_size_splits=2, axis=1)
residual = x
# Right Side
out = self.norm_r1(x, mean1, std1)
out = tf.nn.relu(out)
out = self.upsample(out)
out = self.conv_r1(out)
out = self.norm_r2(out, mean2, std2)
out = tf.nn.relu(out)
out = self.conv_r2(out)
# Left Side
residual = self.upsample(residual)
residual = self.conv_l(residual)
# Merge
out = residual + out
return out
class AdaptiveResidualBlock(tf.keras.layers.Layer):
def __init__(self, channels, emb_size=512, ):
super().__init__()
self.conv1 = ConvLayer(channels, kernel_size=3, stride=1)
self.in1 = AdaIn()
self.conv2 = ConvLayer(channels, kernel_size=3, stride=1)
self.in2 = AdaIn()
# for Adaptive ADAIN - projection layer
self.linear1 = tf.keras.layers.Dense(channels*2, input_shape=(emb_size,), use_bias=False)
self.linear2 = tf.keras.layers.Dense(channels*2, input_shape=(emb_size,), use_bias=False)
def call(self, x, emb):# emb: fixed size= 512
mean1, std1 = tf.split(self.linear1(emb), num_or_size_splits=2, axis=1)
mean2, std2 = tf.split(self.linear2(emb), num_or_size_splits=2, axis=1)
residual = x
out = self.conv1(x)
out = self.in1(out, mean1, std1)
out = tf.nn.relu(out)
out = self.conv2(out)
out = self.in2(out, mean2, std2)
out = out + residual
return out
class AdaptiveMaxPool2d(tf.keras.layers.Layer):
"""
pytorch has nn.AdaptiveMaxPool2d
but tensorflow 2.0 has not.
Implement by myself.
"""
def __init__(self, output_dim=(1,1), data_format='channels_last' ):
super().__init__()
self.data_format = data_format
self.output_dim = output_dim
self.pool = tf.keras.layers.MaxPool2D(padding='valid', data_format=data_format)
def call(self, x):
if self.data_format == 'channels_first' :
return self.call_channel_first(x)
elif self.data_format == "channels_last":
return self.call_channel_last(x)
def call_channel_first(self, x):
in_B, in_C, in_H, in_W = x.shape
op_H, op_W = self.output_dim
p1_H, p2_H = self.get_paddings_for_outputsize_fully_devided(in_H, op_H)
p1_W, p2_W = self.get_paddings_for_outputsize_fully_devided(in_W, op_W)
if p1_H!=0 or p2_H!=0 or p1_W!=0 or p2_W!=0:
paddings = tf.constant([[0, 0,], [0, 0,], [p1_H, p2_H], [p1_W, p2_W]])
x = tf.pad(x, paddings, "REFLECT")
in_B, in_C, in_H, in_W = x.shape
assert in_H%op_H==0
assert in_W%op_W==0
kernel_size_H = self.get_pooling_kernel_size(in_H, op_H)
kernel_size_W = self.get_pooling_kernel_size(in_W, op_W)
self.pool.pool_size = (kernel_size_H, kernel_size_W)
self.pool.strides = (kernel_size_H, kernel_size_W)
x = self.pool(x)
assert x.shape[2] == op_H
assert x.shape[3] == op_W
return x
def call_channel_last(self, x):
in_B, in_H, in_W, in_C = x.shape
op_H, op_W = self.output_dim
p1_H, p2_H = self.get_paddings_for_outputsize_fully_devided(in_H, op_H)
p1_W, p2_W = self.get_paddings_for_outputsize_fully_devided(in_W, op_W)
if p1_H!=0 or p2_H!=0 or p1_W!=0 or p2_W!=0:
paddings = tf.constant([[0, 0,], [p1_H, p2_H], [p1_W, p2_W], [0, 0,] ])
x = tf.pad(x, paddings, "REFLECT")
in_B, in_H, in_W, in_C = x.shape
assert in_H%op_H==0
assert in_W%op_W==0
kernel_size_H = self.get_pooling_kernel_size(in_H, op_H)
kernel_size_W = self.get_pooling_kernel_size(in_W, op_W)
self.pool.pool_size = (kernel_size_H, kernel_size_W)
self.pool.strides = (kernel_size_H, kernel_size_W)
x = self.pool(x)
assert x.shape[1] == op_H
assert x.shape[2] == op_W
return x
def get_pooling_kernel_size(self, in_size, out_size):
assert in_size%out_size==0
stride = in_size//out_size
kernel_size = in_size - (out_size-1)*stride
return kernel_size
def get_paddings_for_outputsize_fully_devided(self, in_size, out_size):
rem = in_size%out_size
if rem >0:
p2 = int((out_size-rem)/2)
p1 = p2 if rem%2==0 else p2+1
return p1, p2
else:
return 0,0 |
import os
import smtplib
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
# Email you want to send the update from (only works with gmail)
fromEmail = '******@gmail.com'
# You can generate an app password here to avoid storing your password in plain text
# https://support.google.com/accounts/answer/185833?hl=en
fromEmailPassword = '*****'
# Email you want to send the update to
toEmail = '*******@gmail.com'
def sendEmail(unknown):
img_data = open(unknown, 'rb').read()
# msg = MIMEMultipart()
# msg['Subject'] = 'subject'
# msg['From'] = 'e@mail.cc'
# msg['To'] = 'e@mail.cc'
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = 'Security Update'
msgRoot['From'] = fromEmail
msgRoot['To'] = toEmail
msgRoot.preamble = 'Raspberry pi security camera update'
text = MIMEText("test")
msgRoot.attach(text)
image = MIMEImage(img_data, name=os.path.basename("unknown/Unknown." + str() + ".jpg"))
msgRoot.attach(image)
# s = smtplib.SMTP(Server, Port)
# s.ehlo()
# s.starttls()
# s.ehlo()
# s.login(UserName, UserPassword)
# s.sendmail(From, To, msg.as_string())
# s.quit()
smtp = smtplib.SMTP('smtp.gmail.com', 587)
smtp.starttls()
smtp.login(fromEmail, fromEmailPassword)
smtp.sendmail(fromEmail, toEmail, msgRoot.as_string())
# smtp.quit()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 12:12:42 2017
@author: Andrew
"""
# Write a program that counts up the number of vowels
# contained in the string s. Valid vowels are:
# 'a', 'e', 'i', 'o', and 'u'. For example, if
# s = 'azcbobobegghakl', your program should print:
# Number of vowels: 5
s = str(input("What do you want?"))
vowel = 0
for letter in s:
if (letter == "a" or letter == "e" or letter == "i" or letter == "o" or letter == "u"):
vowel += 1
print("Number of vowels: " + str(vowel)) |
from .base import BaseProvider
from .address import Address
from .business import Business
from .clothing import ClothingSizes
from .code import Code
from .date import Datetime
from .development import Development
from .file import File
from .food import Food
from .hardware import Hardware
from .internet import Internet
from .numbers import Numbers
from .path import Path
from .personal import Personal
from .science import Science
from .structured import Structured
from .text import Text
from .transport import Transport
from .units import UnitSystem
from .games import Games
from .cryptographic import Cryptographic
from .generic import Generic
__all__ = [
# The main class:
'BaseProvider',
# And data providers:
'Address',
'Business',
'ClothingSizes',
'Code',
'Datetime',
'Development',
'File',
'Food',
'Games',
'Hardware',
'Internet',
'Numbers',
'Path',
'Personal',
'Science',
'Structured',
'Text',
'Transport',
'UnitSystem',
'Cryptographic',
# Has all:
'Generic',
]
|
# id=5192
# Use the Python console in order to evaluate the value of each one of the following expressions.
# Before you start, make sure you create the following two sets:
# a = [1,2,3]
# b = [5,6,7]
# c = [1,2,3,4,5,6,7,8,9]
# The expressions you should check their values are:
# a * 2
# b * 3
# 71/5
# 71//5
# 2 ** 3
# 4 ** 2
# a[1]
# b[2]
# a[0]
# c[2:7]
a = [1, 2, 3]
b = [5, 6, 7]
c = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print("a: ", a)
print("b: ",b)
print("c: ", c)
print("a * 2:", a * 2)
print("b * 3:", b * 3)
print("71 / 5:", 71 / 5)
print("71 // 5:", 71 // 5) # Floor division
print("2 ** 3:", 2 ** 3)
print("4 ** 2:", 4 ** 2)
print("Index a[1]:", a[1]) # Index 2
print("Index b[2]:", b[2]) # Index 3
print("Index a[0]:", a[0]) # Index 0
print("Index c[2:7]:", c[2:7]) # Not includes the index 7 (c[7] == 8)
|
'''
Enumeration class that defines the type of data sets that contain results from data analysis in terms of its persistence
@author: S41nz
'''
class DataSetType:
#Enumeration values
#The data set is cacheable
CACHEABLE = 0
#The data set is feed on streaming
STREAMING = 1
#The data set requires direct fetch from its source
DIRECT_FETCH = 2
|
from django.contrib import admin
from .models import User,Class,Article,Comment
# Register my own models.
class ArticleAdmin(admin.ModelAdmin):
fields = ['article_title','article_simpledesc','article_class','article_publisher','article_pubdate','article_content']
list_display = ('article_title','article_class','article_publisher','article_pubdate')
list_filter = ['article_pubdate','article_class']
search_fields = ['article_title']
class UserAdmin(admin.ModelAdmin):
list_display = ('user_name','user_email','user_isadmin')
admin.site.register(Article,ArticleAdmin)
admin.site.register(User,UserAdmin)
admin.site.register(Class)
admin.site.register(Comment) |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 14:08:44 2020
@author: shino
"""
import numpy as np
import pandas as pd
def read_bin(filename, shape=None, **kwargs):
""" Read a _raw binary_ file and store all possible elements in pandas DataFrame.
If the shape of the array is known, it can be specified using `shape`. The
first three columns are used for x, y and z. Otherwise the binary file is
assumed have row-major format, three columns are formed and used as x, y and
z , respectively.
NOTE: binary files that are not `raw` will not behave as expected. If they
contain a header/footer with meta data, or were generated e.g. via Protobuf,
then bahviour is also undefined.
Parameters
----------
filename: str
Path to the filename
shape: (n_rows, n_cols) - shape to be formed from the loaded binary array, optional.
**kwargs:
kwargs: numpy.fromfile supported kwargs
Check NumPy documentation for all possibilities.
Returns
-------
data: dict
If possible, elements as pandas DataFrames else a NumPy ndarray
"""
data = {}
print(filename)
kwargs['dtype'] = kwargs.get('dtype', np.float32)
arr = np.fromfile(filename, **kwargs)
print(arr[0])
if shape is not None:
try:
arr = arr.reshape(shape)
except ValueError:
raise ValueError(('The array cannot be reshaped to {0} as '
'it has {1} elements, which is not '
'divisible by three'.format(shape, arr.size)))
else:
arr = arr.reshape((-1, 3))
data["points"] = pd.DataFrame(arr[:, 0:3], columns=['x', 'y', 'z'])
return data
def main():
# path = "/Volumes/ssd/diamond.bin"
# print(path)
path = "/Volumes/ssd/NYU/all/T_316000_233500_NW.bin"
data = read_bin(path)
print(data)
main() |
from zope.interface import alsoProvides, implements
from zope.component import adapts
from zope import schema
from plone.directives import form
from plone.dexterity.interfaces import IDexterityContent
from plone.autoform.interfaces import IFormFieldProvider
from plone.namedfile import field as namedfile
from z3c.relationfield.schema import RelationChoice, RelationList
from plone.formwidget.contenttree import ObjPathSourceBinder
from collective.miscbehaviors.behavior.utils import context_property
from collective.miscbehaviors import _
class IContactInfo(form.Schema):
"""
Marker/Form interface for Contact Info
"""
# -*- Your Zope schema definitions here ... -*-
contactName = schema.TextLine(
title=_(u"Contact Name"),
description=u"",
required=False,
)
contactEmail = schema.TextLine(
title=_(u"Contact Email"),
description=u"",
required=False,
)
contactPhone = schema.TextLine(
title=_(u"Contact Phone"),
description=u"",
required=False,
)
alsoProvides(IContactInfo,IFormFieldProvider)
class ContactInfo(object):
"""
Adapter for Contact Info
"""
implements(IContactInfo)
adapts(IDexterityContent)
def __init__(self,context):
self.context = context
# -*- Your behavior property setters & getters here ... -*-
contactPhone = context_property('contactPhone')
contactEmail = context_property('contactEmail')
contactName = context_property('contactName')
|
from fastapi import FastAPI
app = FastAPI(
title = "Logg API",
description = "An API for all your logging needs.",
version = "2.0",
)
class Log(BaseModel):
queueId: str
message: str
logType: str
logDate: str
@app.get("/loggapi/v2/logs/{appId}")
async def Logs(appId: str):
results = storage.GetLogs(appId) # Huh? Bang!
return results
@app.post("/loggapi/v2/log")
async def AddLog(log: Log):
results = storage.AddLog(log)
return results
|
def type_check(type_of_arg):
def decorator(fn):
def wrapper(arg):
if type(arg) != type_of_arg:
return "Bad Type"
else:
func = fn(arg)
return func
return wrapper
return decorator
# @type_check(int)
# def times2(num):
# return num*2
#
#
# print(times2(2))
# print(times2('Not A Number'))
|
# -*- coding: utf-8 -*-
import datetime
import feedparser
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django_jinja import library
from jinja2 import pass_context
def render_feed_list(feeds, template):
return mark_safe(render_to_string(template, {'feeds': feeds}))
@library.global_function
@pass_context
def render_feeds(context, object_type=None, object_id=None, template='feeds/feeds.html'):
feeds = getattr(context.get('request'), '_feeds', [])
return render_feed_list([f for f in feeds if f['object_type'] == object_type and f['object_id'] == object_id], template)
@library.global_function
@pass_context
def render_all_feeds(context, template='feeds/feeds.html'):
feeds = getattr(context.get('request'), '_feeds', [])
return render_feed_list(feeds, template)
@library.global_function
def pull_feeds(url, max_count=4):
posts = []
feed = feedparser.parse(url)
entries_count = len(feed.entries)
for i in range(min(max_count, entries_count)):
try:
pub_date = feed.entries[i].published_parsed
except AttributeError:
pub_date = feed.entries[i].updated_parsed
published = datetime.date(pub_date[0], pub_date[1], pub_date[2])
try:
posts.append({
'title': feed.entries[i].title,
'link': feed.entries[i].link,
'published': published,
})
except IndexError:
pass
ctx = {'posts': posts}
return mark_safe(render_to_string("feeds/pull_feeds.html", ctx))
|
class BoardMap():
'''
BoardMap
'''
def __init__(self, map_name):
self.map_name = map_name
|
"""Test Metadata"""
# pylint: disable=protected-access, missing-function-docstring
import pytest
from appmap._implementation.metadata import Metadata
from appmap.test.helpers import DictIncluding
def test_missing_git(git, monkeypatch):
monkeypatch.setenv('PATH', '')
try:
metadata = Metadata(root_dir=git.cwd)
assert 'git' not in metadata
except FileNotFoundError:
assert False, "_git_available didn't handle missing git"
def test_git_metadata(git):
metadata = Metadata(root_dir=git.cwd)
assert 'git' in metadata
git_md = metadata['git']
assert git_md == DictIncluding({
'repository': 'https://www.example.test/repo.git',
'branch': 'master',
'status': [
'?? new_file'
]
})
for key in (
'tag', 'annotated_tag', 'commits_since_tag', 'commits_since_annotated_tag'
):
assert key not in git_md
def test_tags(git):
atag = 'new_annotated_tag'
git(f'tag -a "{atag}" -m "add annotated tag"')
git('add new_file')
git('commit -m "added new file"')
tag = 'new_tag'
git(f'tag {tag}')
git('rm README.metadata')
git('commit -m "Removed readme"')
metadata = Metadata(root_dir=git.cwd)
git_md = metadata['git']
assert git_md == DictIncluding({
'repository': 'https://www.example.test/repo.git',
'branch': 'master',
'tag': tag,
'annotated_tag': atag,
'commits_since_tag': 1,
'commits_since_annotated_tag': 2
})
def test_add_framework():
Metadata.add_framework('foo', '3.4')
Metadata.add_framework('foo', '3.4')
assert Metadata()['frameworks'] == [{'name': 'foo', 'version': '3.4'}]
Metadata.add_framework('bar')
Metadata.add_framework('baz', '4.2')
assert Metadata()['frameworks'] == [{'name': 'bar'}, {'name': 'baz', 'version': '4.2'}]
|
from django.core.management.base import BaseCommand
from django.core.management.color import no_style
from django.db import connection
from django.contrib.auth.models import ContentType
class Command(BaseCommand):
help = 'Clear Django tables'
def add_arguments(self, parser):
parser.add_argument(
'--all',
action='store_true',
dest='all',
help='Clear Django tables',
)
def handle(self, *args, **options):
if options['all']:
ContentType.objects.all().delete()
sequence_sql = connection.ops.sequence_reset_sql(no_style(), [ContentType, ])
with connection.cursor() as cursor:
for sql in sequence_sql:
cursor.execute(sql)
self.stdout.write(self.style.SUCCESS("Suppression des données des tables de Django terminée!"))
|
# Time -> How many numbers that contain the num-input.
num = int(input())
count = 0
for h in range(num + 1):
for m in range(60):
for s in range(60):
if '3' in str(h) + str(m) + str(s):
count += 1
print(count)
|
import json
import os
import spotipy
import spotipy.oauth2 as oauth2
file_name = "spotify.json"
def auth():
print("Autoryzowenie...")
try:
f = open(file_name)
data = json.load(f)
f.close()
if data == {}:
raise Exception
except Exception as e:
print("Cannot found " + file_name)
exit()
credentials = oauth2.SpotifyClientCredentials(
client_id=data["clientID"], client_secret=data["clientSecret"]
)
token = credentials.get_access_token()
return token["access_token"]
def onInvalidToken():
print("ERROR: Invalid token!")
exit()
|
# Generated by Django 3.1.2 on 2020-10-21 20:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0003_remove_profile_current_save'),
]
operations = [
migrations.CreateModel(
name='Quiz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('created_on', models.DateField(auto_now_add=True)),
('last_updated', models.DateField(auto_now=True)),
('archived', models.BooleanField(default=False)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='quizzes', to='accounts.profile')),
],
),
]
|
# Path sum: two ways
#
with open('../sources/matrix.txt') as f:
source = f.read()
mtx = [[int(n) for n in line.split(',')] for line in source.splitlines()]
for y in range(len(mtx)):
for x in range(len(mtx[y])):
if x == 0 and y == 0:
continue
if x == 0:
mtx[y][x] += mtx[y-1][x]
if y == 0:
mtx[y][x] += mtx[y][x-1]
if x != 0 and y != 0:
# tambah minimal atas atau kiri
mtx[y][x] += min(mtx[y-1][x],mtx[y][x-1])
print(mtx[-1][-1]) |
# content of conftest.py
import pytest
TEST_USER="test@test.com"
url_genss = [
("api.user_worksheet", {"user": "test@test.com", "worksheet": "test"}),
("api.user_gene_table", {"user": "test@test.com", "worksheet": "test", "cluster_name": "4"}),
("api.user_cluster_scatterplot", {"user": "test@test.com", "worksheet": "test", "type": "umap"}),
("api.user_gene_scatterplot", {"user": "test@test.com", "worksheet": "test", "type": "umap", "gene":"CC14"}),
]
@pytest.fixture(params=url_genss)
def url_gens(request):
yield request.param
from cluster.app import create_app
from cluster.database import db as the_db
import os
# Initialize the Flask-App with test-specific settings
the_app = create_app(dict(
SQLALCHEMY_DATABASE_URI="sqlite://",
SQLALCHEMY_USER_DATABASE_URI="sqlite://",
TESTING=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
USER_EMAIL_SENDER_EMAIL="test@tests.com",
USER_ENABLE_USERNAME=False, # email auth only, no username is used
USER_APP_NAME="UCSC Cell Atlas", # in and email templates and page footers
USER_AUTO_LOGIN=False,
USER_AUTO_LOGIN_AFTER_REGISTER=False,
USER_AUTO_LOGIN_AT_LOGIN=False,
SECRET_KEY="*** super duper secret test password ***",
WTF_CSRF_ENABLED=False,
LOGIN_DISABLED=False,
MAIL_SUPPRESS_SEND=True,
SERVER_NAME="localhost.localdomain",
USER_DIRECTORY = os.path.join(os.path.split(os.path.abspath(__file__))[0], "tmp")
))
# Setup an application context (since the tests run outside of the webserver context)
the_app.app_context().push()
@pytest.fixture(scope='session')
def app():
""" Makes the 'app' parameter available to test functions. """
return the_app
@pytest.fixture(scope='session')
def db():
""" Makes the 'db' parameter available to test functions. """
return the_db
@pytest.fixture(scope='function')
def session(db, request):
"""Creates a new database session for a test."""
connection = db.engine.connect()
transaction = connection.begin()
options = dict(bind=connection, binds={})
session = db.create_scoped_session(options=options)
db.session = session
def teardown():
transaction.rollback()
connection.close()
session.remove()
request.addfinalizer(teardown)
return session
@pytest.fixture(scope='session')
def client(app):
return app.test_client()
import shutil
import os
from tests.gen_data import write_all
@pytest.fixture(scope='session')
def user_worksheet_data(request,app):
tmpdir = app.config["USER_DIRECTORY"]
os.mkdir(tmpdir)
filepaths = write_all(tmpdir)
def teardown():
shutil.rmtree(tmpdir)
request.addfinalizer(teardown)
return filepaths
@pytest.fixture(scope='function')
def this_runs(tmp_path):
print(tmp_path)
|
import pandas as pd
from stockdata.technical_indicators.indicators import Indicators
def addIndicators(df):
return Indicators().createIndicators(df)
def addMA(df, n):
return Indicators().MA(df, n)
def addEMA(df, n):
return Indicators().EsssMA(df, n)
|
import shutil
import os
import datetime
now = datetime.datetime.now()
print(now)
before = now - datetime.timedelta(hours = 24)
print (before)
def moveFiles():
source = "C:/Users/Student/Desktop/foldera/"
files = os.listdir(source)
destination = "C:/Users/Student/Desktop/folderb/"
for f in files:
if f.endswith(".txt"):
modtime = datetime.datetime.fromtimestamp(os.path.getmtime(source+f))
if modtime < before:
src = source+f
dst = destination+f
shutil.move(src, dst)
moveFiles()
|
#3
def same_length(ls1, ls2):
if len(ls1) == len(ls2):
size = True;
else:
size = False;
return size
def add_list(ls1, ls2):
size = same_length(ls1,ls2)
newsum = []
newsumstr = ""
if size:
for i in range(len(ls1)):
newsum.append(ls1[i] + ls2[i])
for i in range(len(newsum)):
newsumstr += str(newsum[i]) + "\n"
else:
newsumstr = "Lists are different length"
return newsumstr
def main():
ls1 = []
ls2 = []
num =0
num2 = 0
print("Enter numbers for list 1 and list 2")
while num != "done":
num = input()
if num != "done":
num = int(num)
ls1.append(num)
while num2 != "done":
num2 = input()
if num2 != "done":
num2 = int(num2)
ls2.append(num2)
print("Resulting List... ")
print(add_list(ls1,ls2))
main()
|
import numpy as np
import pylab as pl
from scipy import stats
# load the data in
data = np.genfromtxt('ship-nmpg-imp.csv', delimiter=",", names=True, dtype="f8,i8,f8,f8,f8,f8,i8,i8,S25")
varnames = data.dtype.names
# we loop through names[1:-1] to skip nmpg and uid
for name in varnames[1:-1]:
slope, intercept, r_value, p_value, std_err = stats.linregress(data[name],data['nmpg'])
print "Comparing nmpg to %s" % name
print 'r_value = ', r_value ,' r_sqrd = ', r_value**2 , '\n' ## add code here
pl.figure()
pl.scatter(data[name], data['nmpg'], color='#FF971C')
Y = slope * data[name] + intercept
pl.plot(data[name], Y, color='purple', alpha=0.5 ,linewidth=2) ## add code beginning of line
pl.title('%s model \n r^2= %s, log_pvalue= %s' % (name,r_value*r_value,np.log10(p_value)))
pl.xlabel(name)
pl.ylabel('NMPG')
pl.xticks()
pl.yticks()
pl.show()
|
#=================================#
#============Kanagawa==============#
#=======For Tokyo Studio=====#
#==========Written by Dugy========#
#===========Apr 25th 2017==========#
#==Do not use without permission==#
#=================================#
from math import *
import random
import Rhino.Geometry as rg
import Rhino.Geometry.Vector3d as PVector
import Rhino.Geometry.Point3d as Point
import Rhino.Geometry.Curve as Curve
import Rhino.Geometry.Line as Line
import rhinoscriptsyntax as rs
import scriptcontext as sc
import ghpythonlib.components as ghc
#=======================#
#For List to Datatree
#=======================#
import System.Collections.Generic as SCG
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
def AddToPath(l,dTree,pathRoot=''):
if type(l)==list:
for i in xrange(len(l)):
if pathRoot !='':
AddToPath(l[i],dTree,pathRoot+','+str(i))
else:
AddToPath(l[i],dTree,str(i))
else:
exec('path = GH_Path('+pathRoot+')')
dTree.Add(l,path)
#=======================#
#Random Seed
#=======================#
random.seed(18)
#=======================#
#Basic Functions
#=======================#
def toplimit(vct,speed):
if vct.Length <= speed:
pass
else:
vct.Unitize()
vct = PVector.Multiply(vct,speed)
return vct
def lowerlimit(vct,speed):
if vct.Length >= speed:
pass
else:
vct.Unitize()
vct = PVector.Multiply(vct,speed)
return vct
#=================================#
#Single Classes
#=================================#
class Eddy_config():
"""config for Class Eddy """
def __init__(self, loc_, gravity_, oricharge_, affrange_, id_):
self.id = id_
self.gravity = gravity_
self.oricharge = oricharge_ # orient angle: 0 attract, 1 reject (0-pi)
self.buffer = affrange_
self.loc = loc_
class Eddy():
"""provide gravity"""
rangedemp = 0.5
fieldcharge = 0.25
boundaryZ = 30.0
def __init__(self,config):
self.config = config
self.id = config.id
self.gravity = config.gravity
self.oricharge = config.oricharge # 0 attract 1 reject
self.loc = config.loc
self.buffer = config.buffer # buffer area
self.core = self.rangedemp * self.buffer #core area
class Flow_config(object):
"""config for Class Flow """
def __init__(self, loc_, vel_, acc_ ):
self.loc = loc_
self.vel = vel_
self.acc = acc_
class Flow(object):
"""vector field"""
toplimit = 2.0
lowerlimit = 2.0
boundaryZ = 15.0
def __init__(self, config):
self.config = config
self.loc = config.loc
self.vel = config.vel
self.acc = config.acc
self.ptlist = [Point(self.loc)]
self.trail = None
self.target = PVector(0,0,0)
self.orient = PVector(0,0,0)
self.lolist = [self.loc] #track location
self.velist = [self.vel] #track velocity
self.aclist = [self.acc] #track accleration
self.id = None #waiting for
self.mood = None #the group id this flow belongs to
self.retrail = None #simplified trailcrv
self.reptlist = []
def inradius(self,eddy):
dist = PVector.Subtract(self.loc,eddy.loc)
dist2d = PVector(dist.X,dist.Y,0).Length
if dist2d >= eddy.buffer:
return 0
elif dist2d <= eddy.core:
return -1
else:
return dist2d
def getoricharge(self,eddies):
ori = 0
orisum = []
for eddy in eddies:
sts = self.inradius(eddy)
if sts == 0:
pass
else:
if sts == -1:
ori = eddy.oricharge
else:
ori = (sts -eddy.core)/(eddy.buffer-eddy.core)*(eddy.oricharge-eddy.fieldcharge)+eddy.fieldcharge
orisum.append(ori)
if len(orisum) == 0:
return Eddy.fieldcharge
else:
return sum(orisum)/len(orisum)
def gettarget_2d(self,eddies):
target_2d = PVector(0,0,0)
for i in xrange(len(eddies)):
v = PVector.Subtract(eddies[i].loc,self.loc)
v2d = PVector(v.X,v.Y,0) # 2d planer target, X Y are the most important factors
dist2d = v2d.Length
# compute the sum of the arraction vector / distance
v2d.Unitize()
v2d = PVector.Multiply(v2d, float(eddies[i].gravity/pow(dist2d,1.0)))
# add vector to attraction vector collection
target_2d = PVector.Add(target_2d,v2d)
#Limit the target length
target_2d = toplimit(target_2d,self.toplimit)
target_2d = lowerlimit(target_2d,self.lowerlimit)
self.target = target_2d
def getorient(self,eddies):
orient_2d = self.target
angle = self.getoricharge(eddies)*pi
orient_2d.Rotate(angle, PVector(0,0,1))
self.orient = orient_2d
def getacc_z(self):
acc_z = self.target.Length*0.05
self.acc = PVector(0,0,acc_z)
self.acc = toplimit(self.acc,toplimit)
#def boundarycheck(self):
#if self.loc.Z > self.boundaryZ:
#self.acc = PVector(0,0,-abs(self.acc.Z))
#elif self.loc.Z < -0.2*self.boundaryZ:
#self.acc = PVector(0,0,abs(self.acc.Z))
#else:
#pass
def boundarycheck(self):
if self.loc.Z > self.boundaryZ or self.loc.Z < -0.2*self.boundaryZ:
self.acc = -self.acc
else:
pass
def warp(self,crv):
if rs.PointInPlanarClosedCurve(self.loc,crv) == 0:
self.vel = PVector(-self.vel.X,-self.vel.Y,self.vel.Z)
def getvel(self):
self.vel = PVector.Add(self.vel,self.orient)
self.vel = toplimit(self.vel,self.toplimit)
self.vel = PVector.Add(self.vel,self.acc)
def update(self,eddies,crv):
self.gettarget_2d(eddies)
self.getorient(eddies)
self.getacc_z()
self.boundarycheck()
self.getvel()
self.loc = PVector.Add(self.loc,self.vel)
#self.lolist.append(self.loc)
#self.velist.append(self.vel)
#self.aclist.append(self.acc)
def drawtrial(self):
self.ptlist.append(Point(self.loc))
def drawsimple(self):
self.trail = Curve.CreateInterpolatedCurve(self.ptlist,3)
self.retrail = Curve.Rebuild(self.trail,30,3,True)
self.retrail.Domain = rg.Interval(0,1)
crvts = Curve.DivideByCount(self.retrail,29,True)
self.reptlist = []
for i in xrange(len(crvts)):
self.reptlist.append(Curve.PointAt(self.retrail,crvts[i]))
def run(self,eddies,crv):
self.update(eddies,crv)
self.drawtrial()
#=================================#
#Parameters
#=================================#
n_flow = 800
#=================================#
#Collections
#=================================#
eddyconfig = []
flowconfig = []
eddycollection = []
flowcollection = []
#input eddies
for i in xrange(len(eddyinput)):
loc = PVector(eddyinput[i].X,eddyinput[i].Y,eddyinput[i].Z)
gravity = gravityinput[i]
oricharge = orichargeinput[i]
affrange = affrangeinput[i]
eddyconfig.append(Eddy_config(loc,gravity,oricharge,affrange,i))
eddycollection.append(Eddy(eddyconfig[i]))
#generate flow particles
crv = sc.doc.Objects.AddCurve(sitecrv)
j = -1
for i in xrange(n_flow):
u = random.uniform(0.0,1.0)
v = random.uniform(0.0,1.0)
a = sitesrf.PointAt(u,v)
vel = PVector(random.uniform(0.0,1.0),random.uniform(0.0,1.0),0)
particle = PVector(a.X,a.Y,a.Z)
if rs.PointInPlanarClosedCurve(a,crv) == 0:
pass
else:
flowconfig.append(Flow_config(particle,PVector(0,0,0),PVector(0,0,0)))
for config in flowconfig:
flowcollection.append(Flow(config))
#=================================#
#Draw
#=================================#
for i in xrange(100):
for particle in flowcollection:
particle.run(eddycollection,crv)
for particle in flowcollection:
particle.drawsimple()
#=================================#
#Storage Output
#=================================#
fieldcollection = []
pointcollection_list = []
for particle in flowcollection:
fieldcollection.append(particle.retrail)
pointcollection_list.append(particle.reptlist)
pointcollection = DataTree[object]()
AddToPath(pointcollection_list,pointcollection)
#=================================#
#Group Classes
#=================================#
class Silk(object):
"""shortlines"""
def __init__(self,flow1,flow2):
self.flow = [flow1,flow2]
self.silks = []
def drawsilk(self):
t1 = Curve.DivideByCount(self.flow[0].retrail,99,True)
t2 = Curve.DivideByCount(self.flow[1].retrail,99,True)
for i in xrange(99):
pt1 = Curve.PointAt(self.flow[0].retrail,t1[i])
pt2 = Curve.PointAt(self.flow[1].retrail,t2[i])
self.silks.append(Line(pt1,pt2))
class Wave(object):
"""contains a group of similar flow objects"""
def __init__(self, mood, flow):
self.mood = mood
self.basecrv = flow.retrail
self.crvlist = [self.basecrv]
self.srflist = []
self.flow = [flow]
self.silkbranch = []
def moodbelong(flow,wave):
devi = rg.Curve.GetDistancesBetweenCurves(flow.retrail,wave.basecrv,0.0001)
if devi[0] == False:
return False
elif devi[1] >15.0 or devi[4]> 7.0:
return False
else:
return True
wavecollection = []
for i in xrange(len(flowcollection)):
flowcollection[i].id = i
if i == 0 :
wavecollection.append(Wave(0,flowcollection[i]))
flowcollection[i].mood = 0
else:
for j in xrange(len(wavecollection)):
condi = moodbelong(flowcollection[i],wavecollection[j])
if condi == True:
wavecollection[j].crvlist.append(flowcollection[i].retrail)
wavecollection[j].flow.append(flowcollection[i])
flowcollection[i].mood = j
break
else:
pass
if flowcollection[i].mood == None :
wavecollection.append(Wave(len(wavecollection)+1,flowcollection[i]))
flowcollection[i].mood = j
else:
pass
waveflow = []
wavesrf = []
trailsolid = []
silkcollection = []
silklines = []
sidecrv = [] # the side crvs beside tween crvs
sidesim = []
for wave in wavecollection:
if len(wave.crvlist) == 1:
trailsolid.append(wave.basecrv)
elif len(wave.crvlist) == 2:
wavesrf.append(list(Curve.CreateTweenCurves(wave.crvlist[0],wave.crvlist[1],7)))
sidecrv.append([wave.crvlist[0],wavesrf[-1][0],wavesrf[-1][-1],wave.crvlist[1]])
elif len(wave.crvlist) >= 3:
num1 = random.choice(range(len(wave.crvlist)))
num2 = random.choice(range(len(wave.crvlist)))
while num1 == num2:
num2 = random.choice(range(len(wave.crvlist)))
num3 = random.choice(range(len(wave.crvlist)))
while num2 == num3 or num1 == num3:
num3 = random.choice(range(len(wave.crvlist)))
crv1 = wave.crvlist[num1]
crv2 = wave.crvlist[num2]
crv3 = wave.crvlist[num3]
wavesrf.append(list(Curve.CreateTweenCurves(crv1,crv2,7)))
sidecrv.append([crv1,wavesrf[-1][0],wavesrf[-1][-1],crv2])
wavesrf.append(list(Curve.CreateTweenCurves(crv2,crv3,7)))
sidecrv.append([crv2,wavesrf[-1][0],wavesrf[-1][-1],crv3])
if len(wave.crvlist) >= 5:
wave.silkbranch = range(len(wave.crvlist))
wave.silkbranch.remove(num1)
wave.silkbranch.remove(num2)
wave.silkbranch.remove(num3)
n1 = random.choice(wave.silkbranch)
n2 = random.choice(wave.silkbranch)
while n1 == n2:
n2 = random.choice(wave.silkbranch)
silkcollection.append(Silk(wave.flow[n1],wave.flow[n2]))
silkcollection[-1].drawsilk()
silklines.append(silkcollection[-1].silks)
trailsolid.append(wave.flow[n1].retrail)
trailsolid.append(wave.flow[n2].retrail)
waveflow.append(wave.crvlist)
def drawside(crv,sim):
for i in xrange(len(crv)):
t = [[],[],[],[]]
for j in xrange(len(crv[i])):
t[j] = Curve.DivideByCount(crv[i][j],29,True)
for k in xrange(len(t[j])):
pt0 = Curve.PointAt(crv[i][0],t[0][k])
pt1 = Curve.PointAt(crv[i][1],t[1][k])
pt2 = Curve.PointAt(crv[i][2],t[2][k])
pt3 = Curve.PointAt(crv[i][3],t[3][k])
sim.append(Line(pt0,pt1))
sim.append(Line(pt2,pt3))
return sim
sidesim = drawside(sidecrv,sidesim)
flowgroup = DataTree[object]()
wavesrfline = DataTree[object]()
trailsolid_1 = DataTree[object]()
silklinessolid = DataTree[object]()
sidesimsolid = DataTree[object]()
sidecrv_1 = DataTree[object]()
AddToPath(waveflow,flowgroup)
AddToPath(wavesrf,wavesrfline)
AddToPath(trailsolid,trailsolid_1)
AddToPath(silklines,silklinessolid)
AddToPath(sidecrv,sidecrv_1)
AddToPath(sidesim,sidesimsolid)
srfnum = len(wavesrf) |
# -*- coding: utf-8 -*-
###############################################################################
#
# CreateVolume
# Creates a new EBS volume that your EC2 instance can attach to.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class CreateVolume(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the CreateVolume Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(CreateVolume, self).__init__(temboo_session, '/Library/Amazon/EC2/CreateVolume')
def new_input_set(self):
return CreateVolumeInputSet()
def _make_result_set(self, result, path):
return CreateVolumeResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return CreateVolumeChoreographyExecution(session, exec_id, path)
class CreateVolumeInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the CreateVolume
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AWSAccessKeyId(self, value):
"""
Set the value of the AWSAccessKeyId input for this Choreo. ((required, string) The Access Key ID provided by Amazon Web Services.)
"""
super(CreateVolumeInputSet, self)._set_input('AWSAccessKeyId', value)
def set_AWSSecretKeyId(self, value):
"""
Set the value of the AWSSecretKeyId input for this Choreo. ((required, string) The Secret Key ID provided by Amazon Web Services.)
"""
super(CreateVolumeInputSet, self)._set_input('AWSSecretKeyId', value)
def set_AvailabilityZone(self, value):
"""
Set the value of the AvailabilityZone input for this Choreo. ((required, string) The Availability Zone to use when creating thew new volume (i.e us-east-1a).)
"""
super(CreateVolumeInputSet, self)._set_input('AvailabilityZone', value)
def set_Iops(self, value):
"""
Set the value of the Iops input for this Choreo. ((optional, integer) The number of I/O operations per second (IOPS) that the volume supports. Valid range is 100 to 2000. Required when the volume type is io1.)
"""
super(CreateVolumeInputSet, self)._set_input('Iops', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are "xml" (the default) and "json".)
"""
super(CreateVolumeInputSet, self)._set_input('ResponseFormat', value)
def set_Size(self, value):
"""
Set the value of the Size input for this Choreo. ((conditional, integer) The size for the volume (in gigabytes) that you are creating. Valid Values are 1-1024. Required if you're not creating a volume from a snapshot. If the volume type is io1, the min size is 10 GiB.)
"""
super(CreateVolumeInputSet, self)._set_input('Size', value)
def set_SnapshotId(self, value):
"""
Set the value of the SnapshotId input for this Choreo. ((conditional, string) The snapshot from which to create the new volume. Required if you are creating a volume from a snapshot.)
"""
super(CreateVolumeInputSet, self)._set_input('SnapshotId', value)
def set_UserRegion(self, value):
"""
Set the value of the UserRegion input for this Choreo. ((optional, string) The AWS region that corresponds to the EC2 endpoint you wish to access. The default region is "us-east-1". See description below for valid values.)
"""
super(CreateVolumeInputSet, self)._set_input('UserRegion', value)
def set_VolumeType(self, value):
"""
Set the value of the VolumeType input for this Choreo. ((optional, string) The volume type.Valid values are: "standard" (the default) and "io1".)
"""
super(CreateVolumeInputSet, self)._set_input('VolumeType', value)
class CreateVolumeResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the CreateVolume Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Amazon.)
"""
return self._output.get('Response', None)
class CreateVolumeChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return CreateVolumeResultSet(response, path)
|
# just look at phi(n)/n and reason that minimizer is smooth
from sympy import isprime, nextprime
def red(n, p):
while n % p == 0:
n /= p
return n
def f(n, primes):
v = 1
n_r = n
for p in primes:
if p > n_r:
break
n_r = red(n_r, p)
if n_r != n:
v *= p/(p-1)
return v
max_n = 10**4
primes = []
p = 1
while True:
p = nextprime(p)
if p <= max_n:
primes.append(p)
else:
break
# T = [None for _ in range(max_n+1)]
# T[1] = 1
print([f(n, primes) for n in range(2, max_n)]) |
'''
------DEVICE 1-------
'''
import socket as sk
import time
import datetime
import random
device1_ip = "192.168.1.2"
device1_mac = "10:AF:CB:EF:19:CF"
gateway_ip = "192.168.1.1"
gateway_mac = "05:10:0A:CB:24:EF"
gateway_port = 8100
gateway = ("localhost", gateway_port)
ethernet_header = device1_mac + gateway_mac
IP_header = device1_ip + gateway_ip
random.seed()
while True:
try:
device1Socket = sk.socket(sk.AF_INET, sk.SOCK_DGRAM)
device1Socket.connect(gateway)
device1_port = str(device1Socket.getsockname()[1])
UDP_header = str(device1_port).zfill(5) + str(gateway_port).zfill(5)
now = datetime.datetime.now()
current_hour = str(now.hour) + ":" + str(now.minute) + ":" + str(now.second)
message = current_hour + " – " + str(random.randrange(0, 30)) +"°C – " + str(random.randrange(40, 60)) + "%"
packet = ethernet_header + IP_header + UDP_header + message
print("Source MAC address: " + device1_mac + ", destination MAC address: " + gateway_mac +
"\nSource IP address: " + device1_ip + ", destination IP address: " + gateway_ip +
"\nSource port: " + device1_port + ", destination port: " + str(gateway_port) + "\n")
print ('Sending: "%s"\n' % message)
start = time.time()
device1Socket.sendto(packet.encode(), gateway)
response, address = device1Socket.recvfrom(128)
response = response.decode("utf-8")
end = time.time()
if(response != ""):
print("UDP Trasmission time: ", end-start, " s.\n")
else:
print("Error in packet trasmission!")
device1Socket.close()
time.sleep(30)
except IOError:
device1Socket.close()
|
# -*- coding: utf-8 -*-
# Imprimir os 100 primeiros pares
numeros_pares = 0
numero_atual = 0
while numeros_pares < 100:
numero_atual += 2
numeros_pares += 1
print(numero_atual) |
import torch
import os
import pandas as pd
import numpy as np
from my_image_classification.data import test_transform
from torch.utils.data import DataLoader
import cv2
import torch
import timm
import torch.nn as nn
def get_model(model_name, out_features, drop_rate=0.5):
model = timm.create_model(model_name, pretrained=False)
model.drop_rate = drop_rate
model.classifier = nn.Linear(model.classifier.in_features, out_features)
return model
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
test_dir = '/home/fcq/Competition/cvpr2021/val_images/'
checkpoint = '/home/fcq/Competition/nitre_baseline/my_image_classification/checkpoint-epoch28.pth'
model_name = 'tf_efficientnet_b4'
model = get_model(model_name, 10)
model.load_state_dict(torch.load(checkpoint)['state_dict'])
model.to('cuda')
model.eval()
test_data =[]
for image in os.listdir(test_dir):
id = image[:-4].split('_')[1]
test_data.append([os.path.join(test_dir, image), int(id), 0])
test_df = pd.DataFrame(test_data, columns=['image_path', 'image_id', 'class_id'], index=None)
with torch.no_grad():
for index in range(len(test_df)):
image = cv2.imread(test_df.iloc[index].image_path)
image = test_transform(image=image)['image'].astype(np.float32)
image = image.transpose(2, 0, 1)
image_tensor = torch.tensor(image).unsqueeze(0).float().cuda()
pred = model(image_tensor)
pred = nn.Softmax(dim=1)(pred)
pred = pred.detach().cpu().numpy()
label = pred.argmax()
print(label)
test_df.loc[index, 'class_id'] = [label]
test_df.loc[:, ['image_id', 'class_id']].to_csv('/home/fcq/Competition/cvpr2021/result/result_efficientnet_no_sofmax.csv', index=False)
# test_dataset = SarDataset(test_data, 'test', test_transform)
# test_loader = DataLoader(test, batch_size=val_batch_size, shuffle=False, num_workers=4)
# def pred(model, data):
# model.eval()
# device = torch.device("cuda")
# data = data.transpose(2, 0, 1)
# if data.max() > 1: data = data / 255
# c, x, y = data.shape
# label = np.zeros((x, y))
# x_num = (x//target_l + 1) if x%target_l else x//target_l
# y_num = (y//target_l + 1) if y%target_l else y//target_l
# for i in tqdm(range(x_num)):
# for j in range(y_num):
# x_s, x_e = i*target_l, (i+1)*target_l
# y_s, y_e = j*target_l, (j+1)*target_l
# img = data[:, x_s:x_e, y_s:y_e]
# img = img[np.newaxis, :, :, :].astype(np.float32)
# img = torch.from_numpy(img)
# img = Variable(img.to(device))
# out_l = model(img)
# out_l = out_l.cpu().data.numpy()
# out_l = np.argmax(out_l, axis=1)[0]
# label[x_s:x_e, y_s:y_e] = out_l.astype(np.int8)
# print(label.shape)
# return label |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 07 15:46:30 2016
@author: auort
"""
from PIL import Image
from pylab import *
from numpy import *
from numpy import random
from scipy.ndimage import filters
import scipy.misc
"""
Write code to generate and display a series difference of Gaussian images from an input image
"""
directory = 'C:\\Users\\auort\\Desktop\\CV_E4\\'
def Gauss (identifier, loop, theta):
#Open image, convert to grayscale, and save as array
im = array(Image.open(directory + identifier + '.jpg').convert('L'))
thetastring = str(theta)
for i in xrange(loop):
#Apply Gaussian filter to im, using theta
G = filters.gaussian_filter(im,theta)
savevalue = str(i+1)
result = im - G
scipy.misc.imsave(directory + identifier + '\\DifferenceGaussian_'+ thetastring + '_' + savevalue +'.jpg', result)
im = G
Gauss('1', 10, 2)
Gauss('1', 10, 5)
Gauss('1', 10, 7) |
import requests
import json
import frappe
from six.moves.urllib.parse import urlparse
from six.moves.urllib.parse import parse_qs
@frappe.whitelist()
def get_container():
try:
query_string = urlparse(frappe.request.url).query
# print(query_string)
query = parse_qs(query_string)
print("parameters : ",query)
# req = json.loads(frappe.request.data)
# module = str(req.get('foreign_buyer')).strip()
return "api hit successful...."
except Exception as ex:
print("Exception : ",ex)
return ex
|
import pandas as pd
import os
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
def get_question_list(csv_filename):
csv_data = pd.read_csv(os.path.join(path, csv_filename))
question_list = csv_data['QUESTION'].values.tolist()
return question_list
# question_list = get_question_list("FQ_WEEK.csv")
# print(question_list[:5])
|
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseRedirect
from calculater.models import History
def home_page (request):
return render(request,'home.html')
def me_page (request):
return render(request,'me.html')
def calpost_page (request):
showhistory = History.objects.all()
if request.method == 'POST':
if 'conti' in request.POST :
con_num = request.POST.get('num_y', '')
x = request.POST.get('num_x', '')
op = request.POST.get('op', '')
y = request.POST.get('num_y', '')
if op == '+':
result = int(x) + int(y)
if op == '-':
result = int(x) - int(y)
if op == '*':
result = int(x) * int(y)
if op == '/':
result = int(x) / int(y)
con_num = result
return render(request,'calculaterpost.html',{'result': result,'showhistory':showhistory,'x':x,'y':y,'op':op,'con_num':con_num})
else:
x = request.POST.get('num_x', '')
op = request.POST.get('op', '')
y = request.POST.get('num_y', '')
if op == '+':
result = int(x) + int(y)
if op == '-':
result = int(x) - int(y)
if op == '*':
result = int(x) * int(y)
if op == '/':
result = int(x) / int(y)
history = History.objects.create(number1=x,operater=op,number2=y,result=result)
history.save()
return render(request,'calculaterpost.html',{'result': result,'showhistory':showhistory,'x':x,'y':y,'op':op})
else:
result = 0
return render(request,'calculaterpost.html',{'result': result,'showhistory':showhistory})
|
import sys
import argparse
import os
import json
import html, string, re
import spacy
from spacy.lemmatizer import Lemmatizer
from spacy.lang.en import LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES
indir = '/u/cs401/A1/data/'
abbFile = '/u/cs401/Wordlists/abbrev.english'
stopFile = '/u/cs401/Wordlists/StopWords'
nlp = spacy.load('en', disable=['parser', 'ner'])
def preproc1( comment , steps=range(1,11)):
''' This function pre-processes a single comment
Parameters:
comment : string, the body of a comment
steps : list of ints, each entry in this list corresponds to a preprocessing step
Returns:
modComm : string, the modified comment
'''
# modified initial modComm, so that modComm variable can be use as input in step 2-10
modComm = comment
# remove newline
if 1 in steps:
newcomment = ''
for c in comment:
if c != '\n':
newcomment += c
else:
newcomment += ' '
modComm = newcomment
# remove html character
if 2 in steps:
modComm = html.unescape(modComm)
# remove url
if 3 in steps:
splitcomment = modComm.split()
newcomment = ''
for piece in splitcomment:
newcomment += ' ' + matchWeb(piece).strip()
modComm = newcomment.strip()
# splite punctuation
if 4 in steps:
newcomment = ''
doubleQuote = '".*"'
text = modComm
search = re.search(doubleQuote, text)
while search:
(indexStart, indexEnd) = search.span()
newcomment += ' ' + wordProcess(text[:indexStart - 1])
partition = wordProcess(text[indexStart+1:indexEnd-1])
if len(partition) > 0 and partition[0] in string.punctuation:
newcomment += ' "' + partition
elif len(partition) > 0:
newcomment += ' " ' + partition
if len(partition) > 0 and partition[-1] in string.punctuation:
newcomment += '"'
elif len(partition) > 0:
newcomment += ' "'
text = text[indexEnd:]
search = re.search(doubleQuote, text)
if len(text) > 0:
newcomment += ' ' + wordProcess(text)
modComm = newcomment.strip()
# split clitics
if 5 in steps:
splitcomment = modComm.split()
newcomment = ''
for piece in splitcomment:
if len(piece) > 1 and re.search("'", piece) and piece[0] not in string.punctuation:
for i in range(len(piece)):
if piece[i] == "'":
newcomment += ' ' + piece[:i] + ' ' + piece[i:]
break
else:
newcomment += ' ' + piece
modComm = newcomment.strip()
# tagging
if 6 in steps:
newcomment = ''
splitcomment = modComm.split()
doc = spacy.tokens.Doc(nlp.vocab, words=splitcomment)
doc = nlp.tagger(doc)
for token in doc:
newcomment += ' ' + token.text + '/' + token.tag_
modComm = newcomment.strip()
# remove stop word
if 7 in steps:
stop = []
with open(stopFile, 'r') as f:
stop = f.readlines()
# remove newline character in stop
for i in range(len(stop)):
stop[i] = stop[i][:-1]
newcomment = ''
splitcomment = modComm.split()
for piece in splitcomment:
if re.match('.*/', piece).group()[:-1].lower() not in stop:
newcomment += ' ' + piece
modComm = newcomment.strip()
# lemmalization
if 8 in steps:
newcomment = ''
splitcomment = modComm.split()
tag = []
for i in range(len(splitcomment)):
piece = splitcomment[i]
splitcomment[i] = re.search('.*/', piece).group()[:-1]
tag.append(re.search('/.*', piece).group()[1:])
lemmatizer = Lemmatizer(LEMMA_INDEX, LEMMA_EXC, LEMMA_RULES)
for i in range(len(splitcomment)):
lemmas = lemmatizer(splitcomment[i], unitag(tag[i]))
newcomment += ' ' + lemmas[0] + '/' + tag[i]
modComm = newcomment.strip()
# add newline character after each sentence
if 9 in steps:
newcomment = ''
sentence = './\.'
text = modComm
search = re.search(sentence, text)
while search:
indexEnd = search.span()[1]
newcomment += text[:indexEnd] + '\n'
text = text[indexEnd:]
search = re.search(sentence, text)
if len(text) > 0:
newcomment += text
modComm = newcomment.strip()
# lower case
if 10 in steps:
newcomment = ''
splitcomment = modComm.split()
tag = []
for i in range(len(splitcomment)):
piece = splitcomment[i]
splitcomment[i] = re.match('.*/', piece).group()[:-1]
tag.append(re.search('/.*', piece).group()[1:])
for i in range(len(splitcomment)):
newcomment += ' ' + splitcomment[i].lower() + '/' + tag[i]
if tag[i] == '.':
newcomment += '\n'
modComm = newcomment.strip()
return modComm
def matchWeb(text):
comment = ''
webAddrWithQuote = [re.match('("http)|("www)', text), re.match("('http)|('www)", text)]
if re.match('(http)|(www)', text):
endline = text[-1]
# endline character might be /, which is part of valid web address
if endline in string.punctuation and endline != '/':
comment += text[-1]
# web address might contains in quotation
elif webAddrWithQuote[0] or webAddrWithQuote[1]:
# only one element in webAddrWithQuote can be true
for match in webAddrWithQuote:
if match and match.endpos > len(text):
# if there are any characters after the quotation mark, it has to ba a punctuation
comment += text[match.endpos:]
break
# not a web address, assuming there will always a space before address
else:
comment += ' ' + text
return comment
def wordProcess(text):
abb = []
with open(abbFile, 'r') as f:
abb = f.readlines()
# remove newline character in abb
for i in range(len(abb)):
abb[i] = abb[i][:-1]
comment = ''
if len(text) == 0:
return comment
if text[0] == '"':
comment += '"'
text = text[1:]
for item in text.split():
if item in abb:
comment += ' ' + item
elif item[0] in string.punctuation or item[-1] in string.punctuation and item[-1] != "'":
comment += ' ' + seperatPunc(item).strip()
# punctuation in the middle of word would indicate apostrophes
# else a word without any punctuation
else:
comment += ' ' + item
return comment.strip()
def seperatPunc(text):
comment = ' '
for i in range(len(text)):
if text[i] not in string.punctuation:
comment += text[:i] + ' '
text = text[i:]
break
for i in range(len(text)-1, -1, -1):
if text[i] not in string.punctuation:
comment += text[:i+1] + ' ' + text[i+1:]
break
return comment
def unitag(tag):
uni_tag = ''
if tag in ['NN', 'NNS', 'NNP', 'NNPS']:
uni_tag = 'NOUN'
elif tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ']:
uni_tag = 'VERB'
elif tag in ['JJ', 'JJR', 'JJS']:
uni_tag = 'ADJ'
elif tag in ['#', '$', '.', ',', ':', '(', ')', '"', "'", "''", '``']:
uni_tag = 'PUNCT'
else:
uni_tag = tag
return uni_tag
def main( args ):
allOutput = []
for subdir, dirs, files in os.walk(indir):
for file in files:
fullFile = os.path.join(subdir, file)
print ("Processing " + fullFile)
data = json.load(open(fullFile))
# extract part of data we need
start = args.ID[0]%(len(data))
end = start + int(args.max)
if end > len(data):
end = end - len(data)
if start < end:
data = data[start : end]
else:
data = data[start:] + data[:end]
for line in data:
j = json.loads(line)
for key in []:
j.pop(key)
j['cat'] = file
newbody = preproc1(j['body'])
j['body'] = newbody
allOutput.append(j)
# print('Done!')
fout = open(args.output, 'w')
fout.write(json.dumps(allOutput))
fout.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process each .')
parser.add_argument('ID', metavar='N', type=int, nargs=1,
help='your student ID')
parser.add_argument("-o", "--output", help="Directs the output to a filename of your choice", required=True)
parser.add_argument("--max", help="The maximum number of comments to read from each file", default=10000)
args = parser.parse_args()
if (int(args.max) > 200272):
print ("Error: If you want to read more than 200,272 comments per file, you have to read them all.")
sys.exit(1)
main(args)
|
# Generated by Django 3.0.6 on 2021-02-03 18:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('issueTracker', '0004_auto_20210203_1530'),
]
operations = [
migrations.AlterField(
model_name='ocorrencia',
name='endereco',
field=models.CharField(max_length=1000),
),
]
|
import requests
def get_price_list_daily(symbol,key): #Connect with API and get Daily prices for one symbol
r = requests.get('https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=%s&apikey=%s' % (symbol,key))
price = r.json() #Convert to JSON
price_list = price["Time Series (Daily)"] #Get the Values for "Time Series (Daily)" Key in the dict from JSON
price_data = [] #list to hold the date & Low & Open & Close & High prices
#Note, The Addon that makes charts takes prices in a list with the following order
for k, v in price_list.items():
price_data.append([str(k), float(v['3. low']),float(v['1. open']),float(v['4. close']), float(v['2. high'])])
return price_data
import pandas #Import Pandas to read from CSV
import os
#Note: The csv file downloaded from API End-point https://www.alphavantage.co/query?function=LISTING_STATUS&apikey=demo
file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'list.csv') #Open CSV File that contains all the stock market symbols
def get_symbol():
file = pandas.read_csv(file_path)
symbol_list = file.loc[:, 'symbol'] #Get Symbol column only
return symbol_list
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
from sklearn import metrics
class Results(object):
"""a class to obtain classification results of a set of NNModel instances"""
def __init__(self, models, activityNames):
self.models = models
self.activityNames = activityNames
self.testSamplesList = [] # list of different models' samples np.array
self.testTargetsList = [] # list of different models' targtes np.array
def setSamplesAndTargestLists(self, testSamplesList, testTargetsList):
"""sets the list of tests samples and targets
Parameters
----------
testSamples : list of np.array
testTargets : list of np.array
"""
self.testSamplesList = testSamplesList
self.testTargetsList = testTargetsList
# predicted errors
def modelsError(self, testSamples, testTargets):
"""return models' errors in array form
the prediction error of each model in self.models is evaluated one all
the samples: the rows corresponds to the samples while the columns to the models
Parameters
----------
testSamples : np.array
testTargets : np.array
Returns
-------
np.array
array of errors one for every NN model in self.models
shape (number of samples, number of models)
"""
error = np.zeros((len(testSamples), len(self.models)))
for i, model in enumerate(self.models):
predictedValues = model.predict(testSamples)
error[:,i] = self.mae(predictedValues, testTargets)
return error
def mae(self, predictedValues, targetValues):
""" return np.array of mean absolute values of shape = (samples,)
Parameters
----------
predictedValues : numpy array (shape = (samples, features))
targetValues : numpy array (shape = (samples, features))
Returns
-------
numpy array (shape = (samples,))
"""
return np.mean(np.abs(targetValues - predictedValues), axis=1)
# predicted class (wraper around modelsError)
def predictedClass(self, testSamples, testTargets):
"""return the predicted class in one hot encoded style
starting from the error array computed by modelsError,
for each row find the column with the lowest error
and assign 1 to that column and zero to the others
Parameters
----------
testSamples : np.array
testTargets : np.array
Returns
-------
np.array
array of predicted class one hot encoded shape (num of samples, num of modles)
for a given sample (row) the column == 1 is the predicted class other columns are zero
"""
error = self.modelsError(testSamples, testTargets)
self.predClass = np.zeros(error.shape, dtype=int)
self.predClass[range(len(error)), np.argmin(error, axis=1)] = 1 # fancy indexing
return self.predClass
def getConfusionMatrix(self):
"""Returns the confusion matrix dataframe
the list of self.testSamples and self.testTargetsList
must be set before running this method
"""
classArray = np.zeros(len(self.models), dtype=int)
classArray[0] = 1
self.actualClass = np.repeat([classArray], len(self.testSamplesList[0]), axis=0)
self.predClass = self.predictedClass(self.testSamplesList[0], self.testTargetsList[0])
for i in range(1, len(self.testSamplesList)):
classArray = np.zeros(len(self.models), dtype=int)
classArray[i] = 1
self.actualClass = np.concatenate(
(self.actualClass, np.repeat([classArray], len(self.testSamplesList[i]), axis=0)))
self.predClass = np.concatenate(
(self.predClass, self.predictedClass(self.testSamplesList[i], self.testTargetsList[i])))
matrix = metrics.confusion_matrix(self.actualClass.argmax(axis=1), self.predClass.argmax(axis=1))
df_cm = pd.DataFrame(matrix.transpose(), index = self.activityNames, columns = self.activityNames)
return df_cm
def getEvaluationDf(self):
evalLoss = np.zeros((len(self.models), len(self.activityNames)))
for i, model in enumerate(self.models):
evalLoss[:,i] = np.array([model.evaluate(self.testSamplesList[k], self.testTargetsList[k]) for k in range(len(self.activityNames))]).T
evalDf = pd.DataFrame(data=evalLoss, index= self.activityNames, columns=self.activityNames)
evalDf.columns.name = 'models'
evalDf.index.name = 'data'
return evalDf
|
from drf_spectacular.utils import extend_schema
from rest_framework import serializers, status
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView,
)
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
class MyTokenObtainPairSerializer(TokenObtainPairSerializer):
@classmethod
def get_token(cls, user):
token = super().get_token(user)
token['username'] = user.username
return token
class TokenObtainPairResponseSerializer(serializers.Serializer):
access = serializers.CharField()
refresh = serializers.CharField()
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class DecoratedTokenObtainPairView(TokenObtainPairView):
serializer_class = MyTokenObtainPairSerializer
@extend_schema(
request=None,
responses={status.HTTP_200_OK: TokenObtainPairResponseSerializer}
)
def post(self, request, *args, **kwargs):
return super().post(request, *args, **kwargs)
class TokenRefreshResponseSerializer(serializers.Serializer):
access = serializers.CharField()
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class DecoratedTokenRefreshView(TokenRefreshView):
@extend_schema(
request=None,
responses={status.HTTP_200_OK: TokenRefreshResponseSerializer},
)
def post(self, request, *args, **kwargs):
return super().post(request, *args, **kwargs)
class TokenVerifyResponseSerializer(serializers.Serializer):
def create(self, validated_data):
raise NotImplementedError()
def update(self, instance, validated_data):
raise NotImplementedError()
class DecoratedTokenVerifyView(TokenVerifyView):
@extend_schema(
request=None,
responses={status.HTTP_200_OK: TokenVerifyResponseSerializer},
)
def post(self, request, *args, **kwargs):
return super().post(request, *args, **kwargs)
|
"""Users admin classes"""
#Django
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib import admin
#Models
from django.contrib.auth.models import User
from posts.models import Post
# Register your models here.
@admin.register(Post)
class PostAdmin(admin.ModelAdmin):
"""admin.site.register(Profile)"""
list_display = ('pk','user', 'title', 'photo')
list_display_links = ('pk','user')
list_editable = ('title',)
search_fields = (
'user__username',
'title',
)
'''
list_filter = ('created',
'modified',
'user__is_active',
'user__is_staff'
)
fieldsets = (
('Profile', {
'fields': (
('user','picture'),
),
}),
('Extra Info',{
'fields':(
('phone_number','website'),
('biography'),
)
}),
('Metadata',{
'fields': (
('created', 'modified'),)
}),
)'''
|
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.sitemaps.views import sitemap
from django.views.generic import TemplateView
from django.views.i18n import javascript_catalog
from solid_i18n.urls import solid_i18n_patterns
from apps.front.views import custom_404, custom_500
from .sitemaps import sitemaps
js_info_dict = {
'domain': 'djangojs',
'packages': ('apps.custom_admin', 'apps.front', 'apps.user_profile',)
}
urlpatterns = [
url(r'^robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain'),
{'PROJECT_URI': settings.PROJECT_URI
}),
url(r'^sitemap\.xml$', sitemap, {'sitemaps': sitemaps}, name='django.contrib.sitemaps.views.sitemap'),
]
urlpatterns += solid_i18n_patterns(
url(r'^accounts/', include('apps.accounts.urls', namespace='accounts')),
url(r'^products/', include('apps.products.urls', namespace='custom_products')),
url(r'^cart/', include('apps.cart.urls', namespace='cart')),
url(r'^ckeditor/', include('ckeditor_uploader.urls')),
url(r'^jsi18n/$', javascript_catalog, js_info_dict, name='javascript-catalog'),
url(r'^filer/', include('filer.urls')),
url(r'^404/', custom_404, name='404'),
url(r'^500/', custom_500, name='404'),
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('cms.urls')),
)
handler404 = custom_404
handler500 = custom_500
if settings.DEBUG:
from django.conf.urls.static import static
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
#!/usr/bin/env python
## For determing the number of alignments of novel genes/lncRNAs to each species
## In conjunction with the RNA-seq projecy
import os,sys
outputfile = str(sys.argv[2])
f = [line.strip() for line in open(sys.argv[1])]
w = open(outputfile,'a')
print >>w, sys.argv[1], f
|
import sys
sys.stdin = open("input.txt")
T = int(input())
# 완전탐색으로 풀면 시간초과
def func(n, result) : # 몇번째까지 왔는지
result_arr.add(result)
if n ==N :
return
func(n+1, result)
func(n+1, result+inp_arr[n])
for tc in range(1, T+1):
N = int(input())
inp_arr = list(map(int, input().split()))
result_arr = set()
func(0 , 0)
print("#{} {}".format(tc, len(result_arr)))
|
import json
import math
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def findIndex(n) :
a, b = 0, 1
for _ in range(n):
a, b = b, a+b
return a
def make_response(data, status):
response = {}
for k, v in data.items():
response[k] = v
response["status"] = status
return {"statusCode":status,
"body":json.dumps(response)
}
def validate_event(event):
valid = {}
valid["state"] = False
if isinstance(event, str):
try:
event = json.loads(event)
except Exception as e:
logger.setLevel(logging.ERROR)
logger.error("Not a valid json structure %s", event)
valid["message"] = "Invalid event"
return valid
if "base" not in event or "exponent" not in event:
valid["message"] = "Base and exponent need to be in query."
return valid
valid["base"] = float(event["base"])
valid["exponent"] = float(event["exponent"])
if type(valid["base"]) not in [int, float] or type(valid["exponent"]) not in [int, float]:
valid["message"] = "Base and exponent need to be integers or floats."
return valid
valid["state"] = True
return valid
def function(event, context):
event = event["queryStringParameters"]
logger.info('Event: %s', event)
valid = validate_event(event)
if valid["state"] == False:
return make_response(valid, 400)
base = valid['base']
exponent = valid["exponent"]
p = base ** exponent
logger.info('Fibonaci Response : %s', event)
response = {}
response["base"] = base
response["exponent"] = exponent
response["response"] = p
response["state"] = True
return make_response(response, 200)
|
from django.db import models
from .connection import Connection
class ConnectionSsh(Connection):
class Meta:
verbose_name = "SSH Connection"
verbose_name_plural = "SSH Connections"
# Settings are explained here:
# https://guacamole.apache.org/doc/gug/configuring-guacamole.html#ssh
# Network parameters
# Host Key to check servers identity
host_key = models.TextField(verbose_name="Host Key", blank=True)
# SSH Keepalive interval. By default not used.
server_alive_interval = models.IntegerField(
verbose_name="Keepalive interval", null=False, default=0)
# Authentication
# Private key for users identity
private_key = models.TextField(verbose_name="Private Key", blank=True,
null=True)
# Passprhase do decode private key
passphrase = models.CharField(max_length=100, blank=True, null=True)
# Display settings
#
# these could be also implemented later:
# color_scheme
# font_name
font_size = models.IntegerField(default=8, blank=True)
# other
# Command to run instead of default shell
command = models.CharField(max_length=255, null=True, blank=True)
def save(self, *args, **kwargs):
self.protocol = 'ssh'
super().save(*args, **kwargs)
def __str__(self):
return self.name + " (SSH)"
# Update Guacamole connection parameters with SSH specific parameters
def get_guacamole_parameters(self, user):
parameters = super().get_guacamole_parameters(user=user)
parameters["font_size"] = self.font_size
if self.command:
parameters["command"] = self.command
# check if private_key and passphrase were provided from superclass
# if not, try to add them from connections
if 'private_key' not in parameters:
parameters['private_key'] = self.private_key if self.private_key else ""
if 'passphrase' not in parameters:
parameters['passphrase'] = self.passphrase if self.passphrase else ""
# this is needed so servers do not see us as "dumb terminal"
# by default "linux" is sent
parameters['terminal_type'] = 'xterm'
# becaus we are hax0rs
# parameters['color_scheme'] = 'green-black'
parameters['color_scheme'] = \
'foreground: rgb:FF/D7/00; background: rgb:30/30/30;'
return parameters
|
# -*- coding=UTF-8 -*-
# pyright: strict, reportTypeCommentUsage=none
from __future__ import absolute_import, division, print_function, unicode_literals
import nuke
import wulifang
import wulifang.nuke
from wulifang._util import (
cast_text,
)
def _on_user_create():
n = nuke.thisNode()
class_ = cast_text(n.Class())
if class_ in ("OFXcom.genarts.sapphire.stylize.s_halftone_v1",):
wulifang.message.info("S_Halftone 节点性能不佳,建议用 Hatch 节点代替")
return
if class_ in ("OFXcom.genarts.sapphire.stylize.s_halftonecolor_v1",):
wulifang.message.info("S_HalftoneColor 节点性能不佳,建议用 Hatch 节点代替")
return
if class_ in ("OFXcom.genarts.sapphire.stylize.s_vignette_v1",):
wulifang.message.info("S_Vignette 节点会导致渲染卡顿,建议用 Vignette 节点代替")
return
if class_ in ("thersher"):
wulifang.message.info("%s 节点来自第三方且效果不佳,建议用 Hatch 节点代替" % (class_,))
return
if class_ in ("RealHeatDist",):
wulifang.message.info("%s 节点来自第三方,建议用 HeatDistort 节点代替" % (class_,))
return
if class_ in ("P_Matte", "P_Ramp"):
wulifang.message.info("%s 节点来自第三方,建议用 PositionKeyer 节点代替" % (class_,))
return
if class_ in ("AutocomperArnold",):
wulifang.message.info("%s 节点来自第三方,建议用 AOV 自动组装 (F1) 功能代替" % (class_,))
return
if class_ in ("Chromatic_Aberration",):
wulifang.message.info("%s 节点来自第三方,建议用 Aberration 节点代替" % (class_,))
return
if class_ in ("Group",):
name = cast_text(n.name())
if name.startswith(
"RealGlow",
):
wulifang.message.info("RealGlow 节点来自第三方,建议用 SoftGlow 节点代替")
return
def init_gui():
wulifang.nuke.callback.on_user_create(_on_user_create)
|
# -*- coding: utf-8 -*-
#
# satcfe/alertas.py
#
# Copyright 2015 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from datetime import date
from datetime import datetime
from satcomum.ersat import ChaveCFeSAT
from .util import hms_humanizado
#
# (!) As funções `_get_now` e `_get_today` existem para possibilitar o
# monkeypatch dos valores de data/hora, uma vez que pytest não pode definir
# atributos de built-ins e tipos de extensão como `datetime.datetime`.
#
def _get_now():
return datetime.now()
def _get_today():
return date.today()
class AlertaOperacao(object):
"""Classe base para os alertas de operação."""
alertas_registrados = []
def __init__(self, resposta):
self.resposta = resposta
self._ativo = False
@property
def ativo(self):
"""Indica se o alerta está ou não ativo."""
return self._ativo
def checar(self):
"""Efetivamente checa se o alerta deve ou não ser ativado em função dos
dados da resposta e de outras condições. As classes de alertas devem
sobrescrever este método.
:returns: Retorna ``True`` se o resultado da checagem indicar que este
alerta está ativo (o mesmo que :attr:`ativo`).
:rtype: bool
"""
raise NotImplementedError()
def mensagem(self):
"""Retorna uma mensagem amigável ao usuário, descrevendo da melhor
forma possível a condição do alerta. As classes de alertas devem
sobrescrever este método.
:rtype: unicode
"""
raise NotImplementedError()
class AlertaCFePendentes(AlertaOperacao):
"""Checa a existência de documentos CF-e-SAT pendentes no equipamento SAT,
aguardando serem enviados à SEFAZ. Este alerta estará ativo se houver ao
menos um documento CF-e-SAT pendente de transmissão no equipamento SAT.
"""
def __init__(self, resposta):
super(AlertaCFePendentes, self).__init__(resposta)
self._pendentes = 0
@property
def pendentes(self):
"""Retorna o número de cupons pendentes de transmissão para a SEFAZ.
:rtype: int
"""
return self._pendentes
def checar(self):
if self._vazio(self.resposta.LISTA_INICIAL) \
and self._vazio(self.resposta.LISTA_FINAL):
# as chaves estão vazias (ou totalmente zeradas) o que significa
# que não existem cupons pendentes;
self._pendentes = 0
self._ativo = False
else:
a = self._nCF(self.resposta.LISTA_INICIAL)
b = self._nCF(self.resposta.LISTA_FINAL)
self._pendentes = (a - b) + 1
self._ativo = self._pendentes > 0
return self._ativo
def mensagem(self):
if self.ativo:
if self.pendentes == 1:
frase = (
'Existe 1 cupom CF-e-SAT pendente, que ainda não foi '
'transmitido para a SEFAZ.'
)
else:
frase = (
'Existem {:d} cupons CF-e-SAT pendentes, que ainda '
'não foram transmitidos para a SEFAZ.'
).format(self.pendentes)
else:
frase = 'Nenhum CF-e-SAT pendente de transmissão.'
return '{} {}'.format(frase, self._ultima_comunicacao())
def _vazio(self, chave_cfe):
chave = chave_cfe.strip().strip('0')
return len(chave) == 0
def _nCF(self, chave_cfe):
chave = ChaveCFeSAT('CFe{}'.format(chave_cfe))
return int(chave.numero_cupom_fiscal)
def _momento(self):
if self.resposta.DH_ULTIMA.date() == _get_today():
# a data da última comunicação com a SEFAZ foi hoje, resulta texto
# contendo apenas horas e minutos
texto = 'às {}'.format(self.resposta.DH_ULTIMA.strftime('%H:%M'))
else:
# resulta texto contendo a data e horário
texto = (
'em {}'
).format(self.resposta.DH_ULTIMA.strftime('%d/%m/%Y %H:%M'))
return texto
def _ultima_comunicacao(self):
return (
'A data da última comunicação com a SEFAZ foi {}.'
).format(self._momento())
class AlertaVencimentoCertificado(AlertaOperacao):
"""Checa a data de vencimento do certificado instalado, ativando o alerta
caso o vencimento esteja próximo. Para alterar o limite de proximidade do
vencimento que ativa este alerta, modifique o atributo
:attr:`vencimento_em_dias`, cujo padrão é de 60 dias.
"""
vencimento_em_dias = 60
"""Determina o número de dias até o vencimento do certificado que irá
ativar o alarte.
"""
def __init__(self, resposta):
super(AlertaVencimentoCertificado, self).__init__(resposta)
self._delta = None # datetime.timedelta
@property
def vencido(self):
"""Indica se o certificado instalado no equipamento está vencido.
:rtype: bool
"""
return self._delta.days < 0
@property
def dias_para_vencimento(self):
"""O número de dias que restam até o vencimento do certificado
instalado. Se o certificado já estiver vencido, retornará zero.
:rtype: int
"""
if self.vencido:
return 0
return self._delta.days
def checar(self):
self._delta = self.resposta.CERT_VENCIMENTO - _get_today()
self._ativo = self.dias_para_vencimento <= self.vencimento_em_dias
return self._ativo
def mensagem(self):
if self.vencido:
return 'O certificado instalado venceu!'
num_dias = self.dias_para_vencimento
if num_dias == 0:
texto = 'O certificado instalado vence hoje!'
else:
palavra_dia = 'dia' if num_dias == 1 else 'dias'
texto = (
'O certificado instalado está a {} {} do vencimento.'
).format(num_dias, palavra_dia)
return texto
class AlertaDivergenciaHorarios(AlertaOperacao):
"""Checa o horário do equipamento SAT em relação ao horário atual, emitindo
um alerta caso exista uma divergência entre os horáros superior a 3600
segundos (1 hora). Para alterar o limite de tolerância que ativará este
alerta, modifique o atributo :attr:`tolerancia_em_segundos`.
.. note::
O limite de tolerância para este alerta, de uma hora, é uma herança
do **Requisito XVII** do PAF-ECF, *Sincronismo entre data e hora do
registro com data e hora do Cupom Fiscal*, embora SAT-CF-e não tenha
qualquer relação com o PAF-ECF.
"""
tolerancia_em_segundos = 3600 # 1h
"""Limite de tolerância, em segundos, para ativar o alerta."""
def __init__(self, resposta):
super(AlertaDivergenciaHorarios, self).__init__(resposta)
self._dataref = _get_now()
self._delta = None # datetime.timedelta
@property
def divergencia(self):
"""Divergência em segundos entre o horário local (do computador) e o
horário do equipamento SAT, segundo a resposta de consulta ao status
operacional. Precisão de microsegundos é desprezada.
Uma **divergência negativa** indica que o horário local (do computador)
está atrasado em relação ao relógio do equipamento SAT. Para saber se a
divergência de horarários ultrapassou o limite de tolerância, consulte
o atributo :attr:`~AlertaOperacao.ativo`.
:rtype: int
"""
return int(self._delta.total_seconds())
def checar(self):
self._delta = self._dataref - self.resposta.DH_ATUAL
self._ativo = abs(self.divergencia) > self.tolerancia_em_segundos
return self._ativo
def mensagem(self):
if self.divergencia == 0:
frase = 'Os horários são idênticos (sem divergência).'
else:
if self.ativo:
fmt = '%d/%m/%Y %H:%M'
frase = (
'Há uma divergência entre o horário do sistema e do '
'equipamento SAT superior ao limite tolerável. O '
'horário do sistema é {0} e do equipamento SAT é {1} '
'(tolerância de {2}, divergência de {3}).'
).format(
self._dataref.strftime(fmt),
self.resposta.DH_ATUAL.strftime(fmt),
hms_humanizado(self.tolerancia_em_segundos),
hms_humanizado(abs(self.divergencia)))
else:
situacao = 'atrasado' if self.divergencia < 0 else 'adiantado'
frase = (
'O horário do computador está {0} em relação ao '
'horário do equipamento SAT em {1}, dentro do limite '
'de tolerância de {2}.'
).format(
situacao,
hms_humanizado(abs(self.divergencia)),
hms_humanizado(self.tolerancia_em_segundos))
return frase
def registrar(classe_alerta):
"""Registra uma classe de alerta (subclasse de :class:`AlertaOperacao`).
Para mais detalhes, veja :func:`checar`.
"""
if classe_alerta not in AlertaOperacao.alertas_registrados:
AlertaOperacao.alertas_registrados.append(classe_alerta)
def checar(cliente_sat):
"""Checa em sequência os alertas registrados (veja :func:`registrar`)
contra os dados da consulta ao status operacional do equipamento SAT. Este
método irá então resultar em uma lista dos alertas ativos.
:param cliente_sat: Uma instância de
:class:`satcfe.clientelocal.ClienteSATLocal` ou
:class:`satcfe.clientesathub.ClienteSATHub` onde será invocado o
método para consulta ao status operacional do equipamento SAT.
:rtype: list
"""
resposta = cliente_sat.consultar_status_operacional()
alertas = []
for classe_alerta in AlertaOperacao.alertas_registrados:
alerta = classe_alerta(resposta)
if alerta.checar():
alertas.append(alerta)
return alertas
registrar(AlertaCFePendentes)
registrar(AlertaVencimentoCertificado)
registrar(AlertaDivergenciaHorarios)
|
#!/usr/bin/env python
# encoding: utf-8
"""
Institut Villebon-Charpak
"""
from math import * # package pour les fonctions mathématiques (pi, cos, exp,...)
from random import * # package pour les nombres (pseudo)-aléatoires
a = 1
b = 1024
continuer = True
while continuer:
secret = randrange(a,b)
print("*"*80)
print('Je pense a un nombre entre entier ' + str(a) + ' et ' + str(b) + ', devine lequel !')
found = False
tries = 0
while not found:
guess = int(input())
if guess < secret :
print('Non, mon nombre est plus grand. Essaie encore.')
elif guess > secret :
print("Non, mon nombre est plus petit. Essaie encore.")
else :
found = True
tries += 1
print("Bravo ! Tu as devine que mon nombre etait " + str(secret) + ". Il t'a fallu " + str(tries) + " essais.")
print("*"*80)
print("Veux-tu rejouer ?")
answer = raw_input()
if answer[0] == "n" or answer[0] == "N":
continuer = False
continuer = True
print('On va changer les rôles !')
while continuer:
print("*"*80)
print('Penses a un nombre entre entier ' + str(a) + ' et ' + str(b) + ", et je vais tenter de le deviner. Pense d'abord a ton nombre et appuie sur n'importe quelle touche pour commencer.")
x = raw_input()
found = False
tries = 0
c , d = a, b
while not found:
guess = (c+d)/2
print("Est que ton nombres est " + str(guess) + " ?")
answer = raw_input()+ " " # l'ajout de " " évite que la chaîne soit vide
if answer[0] == ">":
c, d = guess, d
elif answer[0] == "<":
c,d = c, guess
else :
found = True
tries += 1
print("Youhou, j'ai trouve ! Il m'a fallu " + str(tries) + " essais.")
print("Veux-tu rejouer ?")
answer = raw_input()
if answer[0] == "n" or answer[0] == "N":
continuer = False
|
#
# struct_test.py
# Nazareno Bruschi <nazareno.bruschi@unibo.it>
#
# Copyright (C) 2019-2020 University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
PULPNNInstallPath = cwd = os.getcwd() + "/../"
PULPNNSrcDirs = {'script': PULPNNInstallPath + "scripts/"}
PULPNNInstallPath32bit = cwd = os.getcwd() + "/../32bit/"
PULPNNInstallPath64bit = cwd = os.getcwd() + "/../64bit/"
PULPNNTestFolder32bit = PULPNNInstallPath32bit + "test/"
PULPNNTestFolder64bit = PULPNNInstallPath64bit + "test/"
PULPNNSrcDirs32bit = {'pulp_nn_inc': PULPNNInstallPath32bit + "include/",
'pulp_nn_pointwise_convolution': PULPNNInstallPath32bit + "src/StandardConvolutions/",
'pulp_nn_matmul': PULPNNInstallPath32bit + "src/MatrixMultiplications/",
'pulp_nn_depthwise_convolution': PULPNNInstallPath32bit + "src/DepthwiseConvolutions/",
'pulp_nn_linear_convolution_nq': PULPNNInstallPath32bit + "src/LinearConvolutionsNoQuant/",
'pulp_nn_linear_convolution_q': PULPNNInstallPath32bit + "src/LinearConvolutionsQuant/",
'pulp_nn_support_function': PULPNNInstallPath32bit + "src/SupportFunctions/",
'include': PULPNNTestFolder32bit + "include/",
'src': PULPNNTestFolder32bit + "src/",
'pointwise_convolution': PULPNNTestFolder32bit + "src/StandardConvolutions/",
'matmul': PULPNNTestFolder32bit + "src/MatrixMultiplications/",
'depthwise_convolution': PULPNNTestFolder32bit + "src/DepthwiseConvolutions/",
'linear_convolution_nq': PULPNNTestFolder32bit + "src/LinearConvolutionsNoQuant/",
'linear_convolution_q': PULPNNTestFolder32bit + "src/LinearConvolutionsQuant/",
'support_function': PULPNNTestFolder32bit + "src/SupportFunctions/",
'data_allocation_pw': PULPNNTestFolder32bit + "include/DataAllocationStandardConvolutions/",
'data_allocation_dw': PULPNNTestFolder32bit + "include/DataAllocationDepthwiseConvolutions/",
'data_allocation_ln_nq': PULPNNTestFolder32bit + "include/DataAllocationLinearConvolutionsNoQuant/",
'data_allocation_ln_q': PULPNNTestFolder32bit + "include/DataAllocationLinearConvolutionsQuant/",
'golden_model_pw': PULPNNTestFolder32bit + "include/GoldenModelStandardConvolutions/",
'golden_model_dw': PULPNNTestFolder32bit + "include/GoldenModelDepthwiseConvolutions/",
'golden_model_ln_nq': PULPNNTestFolder32bit + "include/GoldenModelLinearConvolutionsNoQuant/",
'golden_model_ln_q': PULPNNTestFolder32bit + "include/GoldenModelLinearConvolutionsQuant/",
'test': PULPNNTestFolder32bit}
PULPNNSrcDirs64bit = {'pulp_nn_inc': PULPNNInstallPath64bit + "include/",
'pulp_nn_pointwise_convolution': PULPNNInstallPath64bit + "src/StandardConvolutions/",
'pulp_nn_matmul': PULPNNInstallPath64bit + "src/MatrixMultiplications/",
'pulp_nn_depthwise_convolution': PULPNNInstallPath64bit + "src/DepthwiseConvolutions/",
'pulp_nn_linear_convolution_nq': PULPNNInstallPath64bit + "src/LinearConvolutionsNoQuant/",
'pulp_nn_linear_convolution_q': PULPNNInstallPath64bit + "src/LinearConvolutionsQuant/",
'pulp_nn_support_function': PULPNNInstallPath64bit + "src/SupportFunctions/",
'include': PULPNNTestFolder64bit + "include/",
'src': PULPNNTestFolder64bit + "src/",
'pointwise_convolution': PULPNNTestFolder64bit + "src/StandardConvolutions/",
'matmul': PULPNNTestFolder64bit + "src/MatrixMultiplications/",
'depthwise_convolution': PULPNNTestFolder64bit + "src/DepthwiseConvolutions/",
'linear_convolution_nq': PULPNNTestFolder64bit + "src/LinearConvolutionsNoQuant/",
'linear_convolution_q': PULPNNTestFolder64bit + "src/LinearConvolutionsQuant/",
'support_function': PULPNNTestFolder64bit + "src/SupportFunctions/",
'data_allocation_pw': PULPNNTestFolder64bit + "include/DataAllocationStandardConvolutions/",
'data_allocation_dw': PULPNNTestFolder64bit + "include/DataAllocationDepthwiseConvolutions/",
'data_allocation_ln_nq': PULPNNTestFolder64bit + "include/DataAllocationLinearConvolutionsNoQuant/",
'data_allocation_ln_q': PULPNNTestFolder64bit + "include/DataAllocationLinearConvolutionsQuant/",
'golden_model_pw': PULPNNTestFolder64bit + "include/GoldenModelStandardConvolutions/",
'golden_model_dw': PULPNNTestFolder64bit + "include/GoldenModelDepthwiseConvolutions/",
'golden_model_ln_nq': PULPNNTestFolder64bit + "include/GoldenModelLinearConvolutionsNoQuant/",
'golden_model_ln_q': PULPNNTestFolder64bit + "include/GoldenModelLinearConvolutionsQuant/",
'test': PULPNNTestFolder64bit} |
import os
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import pandas
import flask
server = flask.Flask('app')
server.secret_key = os.environ.get('secret_key', 'secret')
app = dash.Dash('app', server=server)
app.title = 'WCBU 2017 Statistics'
app.scripts.config.serve_locally = False
dcc._js_dist[0]['external_url'] = 'https://cdn.plot.ly/plotly-basic-latest.min.js'
players = pandas.read_csv('players.csv')
team_options = [
{'label': team, 'value': team} for team in
list(players['TeamName'].unique())
]
division_options = [
{'label': division, 'value': division} for division in
list(players['Division'].unique())
]
app.layout = html.Div([
html.H1('Player Statistics'),
dcc.Dropdown(
id='divisions-dropdown',
options=division_options,
value='Mixed'
),
dcc.Dropdown(
id='teams-dropdown',
options=team_options,
value='India'
),
dcc.Graph(id='my-graph')
], className="container")
app.css.append_css({
'external_url': (
'https://cdn.rawgit.com/plotly/dash-app-stylesheets/8bc4d40ae11324931d832b02dc91183025b50f6a/dash-hello-world.css'
)
})
@app.callback(Output('my-graph', 'figure'),
[Input('divisions-dropdown', 'value'), Input('teams-dropdown', 'value')])
def update_graph(division, team):
title = "Player Statistics - {} - {}".format(team, division)
team_players = players[
(players.TeamName == team) & (players.Division == division)
]
player_names = team_players.FirstName + ' ' + team_players.LastName
fig = {"data": [
{
"values": team_players.Goals,
"labels": player_names,
"domain": {"x": [0, 0.48]},
"name": "Goals",
"textposition":"inside",
"hoverinfo":"label+value+name",
"hole": .4,
"type": "pie",
"textinfo": "value",
},
{
"values": team_players.Assists,
"labels": player_names,
"domain": {"x": [0.52, 1]},
"name": "Assists",
"hoverinfo":"label+value+name+percent",
"textposition":"inside",
"hole": .4,
"type": "pie",
"textinfo": "value",
}
],
"layout": {
"title": title,
"annotations": [
{
"font": {
"size": 20
},
"showarrow": False,
"text": "Goals",
"x": 0.22,
"y": 0.5
},
{
"font": {
"size": 20
},
"showarrow": False,
"text": "Assists",
"x": 0.785,
"y": 0.5
}
]
}
}
return fig
if __name__ == '__main__':
app.run_server()
|
from StarWars import views
from django.conf.urls import url
"""
This file maps the URLs with the view functions.
"""
urlpatterns = [
url(r'^$', views.Index, name='Index'),
url(r'^load_data/$', views.HomePage, name='HomePage'),
url(r'^Search_Planet/$', views.SearchPlanet, name='SearchPlanet'),
url(r'^Display_Planet/$', views.DisplayPlanet, name='DisplayPlanet'),
url(r'^add_data/$', views.add_data, name='add_data'),
url(r'^add_Planets_data/$', views.add_planet_data, name='add_planet_data'),
url(r'^Display_Saved_Data/$', views.display_saved_data, name='display_saved_data'),
]
|
import sys
def Graph(nodes):
graph={}
for i in range(nodes):
graph[i]=[]
return(graph)
def addPath(n1,n2,weight,graph):
graph[n1].append([n2,weight])
graph[n2].append([n1,weight])
return(graph)
def generateMST(graph,start):
temp_graph={}
for i in graph:
temp_graph[i]=graph[i]
n=len(graph)
visited=[False]*n
mst=Graph(n)
dist=[sys.maxsize]*n
dist[start]=0
vertex=start
for i in range(n):
visited[vertex]=True
for nodeval in temp_graph[vertex]:
node=nodeval[0]
weight=nodeval[1]
if(dist[node]>weight+dist[vertex]):
dist[node]=weight+dist[vertex]
minver=-1
minval=sys.maxsize
for i in range(n):
if(visited[i]==False and dist[i]<minval):
minval=dist[i]
minver=i
if(minver!=-1):
mst[vertex].append([minver,minval-dist[vertex]])
mst[minver].append([vertex,minval-dist[vertex]])
vertex=minver
return(mst,dist)
graph = Graph(9)
graph=addPath(0, 1, 4, graph)
graph=addPath(0, 7, 8, graph)
graph=addPath(1, 2, 8, graph)
graph=addPath(1, 7, 11, graph)
graph=addPath(2, 3, 7, graph)
graph=addPath(2, 8, 2, graph)
graph=addPath(2, 5, 4, graph)
graph=addPath(3, 4, 9, graph)
graph=addPath(3, 5, 14, graph)
graph=addPath(4, 5, 10, graph)
graph=addPath(5, 6, 2, graph)
graph=addPath(6, 7, 1, graph)
graph=addPath(6, 8, 6, graph)
graph=addPath(7, 8, 7, graph)
mst,distances=generateMST(graph,0)
print(mst)
print(" ".join(map(str,distances)))
|
"""
CDC vital statistics
Link
* http://www.cdc.gov/nchs/data_access/vitalstatsonline.htm
Prepared for Data Bootcamp course at NYU
* https://github.com/DaveBackus/Data_Bootcamp
* https://github.com/DaveBackus/Data_Bootcamp/Code/Lab
Written by Dave Backus, January 2016
Created with Python 3.5
"""
"""
import packages, check versions
"""
import sys
import pandas as pd
#import matplotlib.pyplot as plt
print('\nPython version: ', sys.version)
print('Pandas version: ', pd.__version__, '\n')
#%%
"""
read data
"""
url1 = 'ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/Datasets/'
url2 = 'DVS/mortality/mort2014us.zip'
# healthcare spending
cdc = pd.read_excel(url1+url2,
skiprows=3, sheetname=1, index_col=0,
na_values=['..'])
# select years
hc = hc[list(range(1980,2014))]
# select countries and transpose df
countries = ['Canada', 'France', 'Germany', 'Japan', 'United Kingdom',
'United States']
some = hc[hc.index.isin(countries)].T
# plot
ax = some.plot(lw=2, subplots=False)
ax.set_title('Healthcare spending', fontsize=14, loc='left')
ax.set_ylabel('Percent of GDP')
ax.legend(loc='upper left', fontsize=10, handlelength=2, labelspacing=0.15)
#%%
"""
Plot number of docs
"""
# number of docs
docs = pd.read_excel(url1+url2,
skiprows=3, sheetname='Physicians', index_col=0,
skip_footer=21,
na_values=['..'])
# select years
docs = docs[[2012]]
# mpd.columns = map(str.rstrip, mpd.columns)
docs.index = [name.rsplit(maxsplit=1)[0] for name in docs.index.tolist()]
# select countries
countries = ['Canada', 'France', 'Germany', 'Japan', 'United Kingdom',
'United States']
some = docs[docs.index.isin(countries)]
# plot
ax = some.plot(kind='barh', alpha=0.5, legend=False)
ax.set_title('Number of doctors', fontsize=14, loc='left')
ax.set_xlabel('Number of Doctors per 1000 Population')
|
from flask import Blueprint
routes = Blueprint('routes', __name__)
from .index import *
from .other import *
|
import platform
from .image import ImageFloatRGBA
from ..samplers import Sample
class Film:
def __init__(self, width, height, renderer):
self._image = ImageFloatRGBA(width, height)
self._height = height
self._renderer = renderer
self._ds = None
self.set_pass(0)
def set_pass(self, n):
self._current_pass = float(n)
self._inv_pass = 1.0 / (float(n) + 1.0)
self._populate_ds()
def set_resolution(self, width, height):
self._image = ImageFloatRGBA(width, height)
self._height = height
@property
def image(self):
return self._image
#TODO -- currently only box filter are suported for now
def add_sample(self, sample, spectrum):
r, g, b = self._renderer.color_mgr.to_RGB(spectrum)
if r < 0.0: r = 0.0
if g < 0.0: g = 0.0
if b < 0.0: b = 0.0
iy = self._height - sample.iy - 1 #flip the image
r1, g1, b1, a1 = self._image.get_pixel(sample.ix, iy)
scaler = self._current_pass
inv_scaler = self._inv_pass
r = (r1 * scaler + r) * inv_scaler
g = (g1 * scaler + g) * inv_scaler
b = (b1 * scaler + b) * inv_scaler
self._image.set_pixel(sample.ix, iy, r, g, b)
def add_sample_asm(self, runtimes, label):
#eax - pointer to spectrum
#ebx - pointer to sample
bits = platform.architecture()[0]
asm_structs = Sample.struct()
ASM = """
#DATA
"""
ASM += asm_structs + """
float alpha_channel[4] = 0.0, 0.0, 0.0, 0.99
uint32 height
"""
if bits == '64bit':
ASM += "uint64 ptr_buffer\n"
else:
ASM += "uint32 ptr_buffer\n"
ASM += """
uint32 pitch_buffer
uint32 mask[4] = 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00
float scaler[4]
float inv_scaler[4]
#CODE
"""
ASM += "global " + label + ":\n "
if bits == '64bit':
ASM += 'push rbx\n'
else:
ASM += 'push ebx\n'
ASM += """macro call spectrum_to_rgb
macro eq128 xmm4 = mask
macro call andps xmm0, xmm4
macro eq128 xmm0 = xmm0 + alpha_channel
macro call zero xmm5
macro call maxps xmm0, xmm5
;flip the image and call set pixel
"""
if bits == '64bit':
ASM += """
pop rbx
mov eax, dword [rbx + sample.ix]
mov ecx, dword [rbx + sample.iy]
mov ebx, dword [height] ;because of flipping image
sub ebx, ecx
mov edx, dword [pitch_buffer]
mov rsi, qword [ptr_buffer]
imul ebx, edx
imul eax, eax, 16
add eax, ebx
add rax, rsi
"""
else:
ASM += """
pop ebx
mov eax, dword [ebx + sample.ix]
mov ecx, dword [ebx + sample.iy]
mov ebx, dword [height] ;because of flipping image
sub ebx, ecx
mov edx, dword [pitch_buffer]
mov esi, dword [ptr_buffer]
imul ebx, edx
imul eax, eax, 16
add eax, ebx
add eax, esi
"""
ASM += """
macro eq128 xmm1 = eax
macro eq128 xmm1 = xmm1 * scaler + xmm0
macro eq128 xmm1 = xmm1 * inv_scaler
macro eq128 eax = xmm1 {xmm7}
ret
"""
#TODO -- put alpha channel to 0.99 after xmm1 * inv_scaler
mc = self._renderer.assembler.assemble(ASM, True)
#mc.print_machine_code()
name = "film" + str(id(self))
self._ds = []
for r in runtimes:
if not r.global_exists(label):
self._ds.append(r.load(name, mc))
self._populate_ds()
def _populate_ds(self):
if self._ds is None:
return
for ds in self._ds:
width, height = self._image.size()
ds["height"] = height - 1
scaler = self._current_pass
inv_scaler = self._inv_pass
ds["scaler"] = (scaler, scaler, scaler, 1.0)
ds["inv_scaler"] = (inv_scaler, inv_scaler, inv_scaler, 1.0)
addr, pitch = self._image.address_info()
ds["ptr_buffer"] = addr
ds["pitch_buffer"] = pitch
|
'''
03 - Faça um algoritmo que calcule a área de um triângulo, considerando a
fórmula (base*altura)/2 . Utilize as variáveis AREA, BASE e ALTURA.
#PORTUGOL#
Defina: area, base, altura: Real
INICIO
escreva ("Qual o tamanho da base do triângulo? ")
leia (base)
escreva ("Qual o tamanho da altura do triângulo?")
leia (altura)
area <-- (base * altura)/2
escreva ("A área do triângulo é: ", área)
FIM
'''
altura = float(input("Digite o tamanho da altura, maior que zero: "))
while altura < 0:
altura = float(input("Digite o tamanho da altura, maior que zero: "))
base = float(input("Digite o tamanho da base, maior que zero: "))
while base < 0:
base = float(input("Digite o tamanho da base, maior que zero: "))
area = (base*altura)/2
print("O tamanho da area é igual a: ", area)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.