id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1841274 | <filename>metricreporter.py
import requests
import json
import socket
from time import time
from pprint import pprint
class MetricsReporter(object):
PATH = "/ws/v1/timeline/metrics"
HEADERS = {"Content-Type": "application/json"}
def __init__(self, url_base, metricname, appid):
self.url_base = url_base
self.metricname = metricname
self.appid = appid
self.hostname = None
self.url = url_base + self.PATH
self.value = None
self.response = None
self.metrics_jso = None
self.metrics_dict = {
"metrics": []
}
self.metrics = []
def post(self):
'''
Obtain metric, build dictionary, convert to json and send POST request
'''
value = self.calculate_metric()
self.generate_metrics_dict(value)
self.response = requests.post(self.url,
data=self.metrics_jso,
headers=self.HEADERS)
return self.response
def set_metricname(self, metricname):
self.metricname = metricname
def set_appid(self, appid):
self.appid = appid
def set_hostname(self, hostname):
self.hostname = hostname
def calculate_metric(self):
'''
Here the logic is implemented
returns: numeric value (int, float,...)
'''
pass
def generate_metrics_dict(self, value):
'''
Build the dictionary, that will eventually be posted as JSON
and print it.
'''
ts_now = int(time() * 1000)
metric = {}
metric["timestamp"] = ts_now
metric["metricname"] = self.metricname
metric["appid"] = self.appid
if self.hostname:
metric["hostname"] = self.hostname or socket.getfqdn()
metric["starttime"] = ts_now
metric["metrics"] = {
str(ts_now): value
}
self.metrics.append(metric)
self.metrics_dict["metrics"] = self.metrics
pprint(self.metrics_dict)
self.metrics_jso = json.dumps(self.metrics_dict)
def get_response(self):
return self.response
class GenericMetricsReporter(MetricsReporter):
'''
Give the MetricsReporter a function that obtains the value and report it
to AMS.
Example: ms = GenericMetricsReporter(url, )
'''
def __init__(self, url_base, metricname, appid, obtain_metric):
MetricsReporter.__init__(self, url_base, metricname, appid)
if not callable(obtain_metric):
raise Exception("Parameter obtain_metric must be callable.")
self.obtain_metric = obtain_metric
def calculate_metric(self):
'''
Call function and return value
'''
return self.obtain_metric()
class RESTMetricsReporter(MetricsReporter):
'''
Get a metric from a REST call to a service
'''
def __init__(self, url_base, metricname, appid, source_url, json_key):
MetricsReporter.__init__(self, url_base, metricname, appid)
self.source_url = source_url
self.json_key = json_key
def set_source_url(self):
self.source_url = self.source_url
def set_json_key(self):
self.json_key = self.json_key
def calculate_metric(self):
'''
Do a REST call evaluate the response and obtain a value to store as metric.
'''
response = requests.get(self.source_url)
if response.status_code == 200:
json = response.json()
return json[self.json_key]
else:
raise Exception("No value could be obtained from response!")
| StarcoderdataPython |
3496798 | # -*- coding: utf-8 -*-
## Used Imports
import os
import io
import zipfile
import random
import numpy as np
import streamlit as st
import clip
import gc
# import psutil ## show info (cpu, memeory)
from io import BytesIO
from PIL import Image
from zipfile import ZipFile
from pathlib import Path, PurePath, PureWindowsPath
# from streamlit import caching
## --------------- USED FUNCTIONS ---------------
def Predict_1_vs_0():
st.session_state['init_data']['image_current_predictions']=[]
for i in range(len(st.session_state['init_data']['image_current_probs'][:,0])):
if st.session_state['init_data']['image_current_probs'][i,1]>st.session_state['init_data']['image_current_probs'][i,0]:
st.session_state['init_data']['image_current_predictions'].append(1)
else:
st.session_state['init_data']['image_current_predictions'].append(0)
st.session_state['init_data']['image_current_predictions']=np.array(st.session_state['init_data']['image_current_predictions'])
def Predict_0_vs_1():
st.session_state['init_data']['image_current_predictions']=[]
for i in range(len(st.session_state['init_data']['image_current_probs'][:,0])):
if st.session_state['init_data']['image_current_probs'][i,0]>st.session_state['init_data']['image_current_probs'][i,1]:
st.session_state['init_data']['image_current_predictions'].append(1)
else:
st.session_state['init_data']['image_current_predictions'].append(0)
st.session_state['init_data']['image_current_predictions']=np.array(st.session_state['init_data']['image_current_predictions'])
def Predict_1_vs_2():
st.session_state['init_data']['image_current_predictions']=[]
for i in range(len(st.session_state['init_data']['image_current_probs'][:,0])):
if st.session_state['init_data']['image_current_probs'][i,1]>st.session_state['init_data']['image_current_probs'][i,2]:
st.session_state['init_data']['image_current_predictions'].append(1)
else:
st.session_state['init_data']['image_current_predictions'].append(0)
st.session_state['init_data']['image_current_predictions']=np.array(st.session_state['init_data']['image_current_predictions'])
def Predict_0_vs_all():
st.session_state['init_data']['image_current_predictions']=[]
for i in range(len(st.session_state['init_data']['image_current_probs'][:,0])):
if np.argmax(st.session_state['init_data']['image_current_probs'][i,:])==0:
st.session_state['init_data']['image_current_predictions'].append(1)
else:
st.session_state['init_data']['image_current_predictions'].append(0)
st.session_state['init_data']['image_current_predictions']=np.array(st.session_state['init_data']['image_current_predictions'])
def Predict_bald():
st.session_state['init_data']['image_current_predictions']=[]
for i in range(len(st.session_state['init_data']['image_current_probs'][:,0])):
if st.session_state['init_data']['image_current_probs'][i,0]>st.session_state['init_data']['image_current_probs'][i,1]:
if st.session_state['init_data']['image_current_probs'][i,2]>st.session_state['init_data']['image_current_probs'][i,3]:
st.session_state['init_data']['image_current_predictions'].append(1)
else:
st.session_state['init_data']['image_current_predictions'].append(0)
else:
if st.session_state['init_data']['image_current_probs'][i,4]>st.session_state['init_data']['image_current_probs'][i,5]:
st.session_state['init_data']['image_current_predictions'].append(1)
else:
st.session_state['init_data']['image_current_predictions'].append(0)
st.session_state['init_data']['image_current_predictions']=np.array(st.session_state['init_data']['image_current_predictions'])
def CLIP_Process():
## Tokenization process
clip_model, clip_transform=Load_CLIP()
clip_text = clip.tokenize(st.session_state['init_data']['current_querys']).to("cpu")
n_tokens=len(st.session_state['init_data']['current_querys'])
## Image Process
st.session_state['init_data']['image_current_probs']=np.zeros((st.session_state['init_data']['n_images'],n_tokens))
for i in range(st.session_state['init_data']['n_images']):
current_image_file = Load_Image(i)
img_preprocessed = clip_transform(Image.fromarray(current_image_file)).unsqueeze(0).to("cpu")
img_logits, img_logits_txt = clip_model(img_preprocessed, clip_text)
st.session_state['init_data']['image_current_probs'][i,:]=np.round(img_logits.detach().numpy()[0],2)
gc.collect()
del i,n_tokens,clip_model,clip_transform,clip_text,current_image_file,img_preprocessed,img_logits,img_logits_txt
gc.collect()
def Image_discarding():
for i in range(len(st.session_state['init_data']['current_images_discarted'])):
if st.session_state['init_data']['current_images_discarted'][i]==0 and st.session_state['init_data']['image_current_predictions'][i]!=st.session_state['init_data']['image_current_predictions'][st.session_state['init_data']['current_winner_index']]:
st.session_state['init_data']['current_images_discarted'][i]=1
previous_names=st.session_state['init_data']['current_image_names']
st.session_state['init_data']['current_image_names']=[]
previous_files=st.session_state['init_data']['image_current_paths']
st.session_state['init_data']['image_current_paths']=[]
previous_predictions=st.session_state['init_data']['image_current_predictions']
st.session_state['init_data']['image_current_predictions']=[]
current_index=0
new_index=0
for i in range(st.session_state['init_data']['n_images']):
if st.session_state['init_data']['current_images_discarted'][current_index]==0:
st.session_state['init_data']['image_current_paths'].append(previous_files[current_index])
st.session_state['init_data']['current_image_names'].append(previous_names[current_index])
st.session_state['init_data']['image_current_predictions'].append(previous_predictions[current_index])
if current_index==st.session_state['init_data']['current_winner_index']:
st.session_state['init_data']['current_winner_index']=new_index
new_index+=1
current_index+=1
st.session_state['init_data']['n_images']=np.sum(st.session_state['init_data']['current_images_discarted']==0)
st.session_state['init_data']['current_image_names']=np.array(st.session_state['init_data']['current_image_names'])
st.session_state['init_data']['image_current_paths']=np.array(st.session_state['init_data']['image_current_paths'])
st.session_state['init_data']['current_images_discarted']=np.zeros(st.session_state['init_data']['n_images'])
del previous_names,previous_files,previous_predictions,current_index,new_index,i
def Show_images():
showed_images=[]
for current_index in range(st.session_state['init_data']['n_images']):
if st.session_state['init_data']['show_results']:
current_line_width=4
if st.session_state['init_data']['image_current_predictions'][current_index]==st.session_state['init_data']['image_current_predictions'][st.session_state['init_data']['current_winner_index']]:
current_color=np.array([0,255,0])
else:
current_color=np.array([255,0,0])
else:
current_line_width=2
current_color=np.zeros(3)
image_size=240
current_image_file=Load_Image(current_index)
w,h,c = np.shape(current_image_file)
images_separation=image_size-w-current_line_width*2
image_highlighted=np.zeros([h+current_line_width*2,image_size,c])+255
image_highlighted[current_line_width:w+current_line_width,current_line_width:w+current_line_width,:]=current_image_file
image_highlighted[:current_line_width,:w+2*current_line_width,:]=current_color
image_highlighted[w+current_line_width:,:w+2*current_line_width,:]=current_color
image_highlighted[:,w+current_line_width:w+2*current_line_width,:]=current_color
image_highlighted[:,:current_line_width,:]=current_color
showed_images.append(image_highlighted)
## result to array
showed_images=np.array(showed_images)/255
del image_highlighted,current_index,current_line_width,current_color,image_size,current_image_file,w,h,c
return showed_images
def Select_Images_Randomly():
st.session_state['init_data']['image_current_paths']=[]
st.session_state['init_data']['current_image_names']=[]
image_index=[]
archive = zipfile.ZipFile(st.session_state['init_data']['zip_file'], 'r')
listOfFileNames = archive.namelist()
image_index_all=list(range(len(listOfFileNames)))
image_index.append(random.choice(image_index_all))
image_index_all.remove(image_index[0])
current_index=1
while len(image_index)<st.session_state['init_data']['n_images']:
image_index.append(random.choice(image_index_all))
image_index_all.remove(image_index[current_index])
current_index+=1
# Iterate over the file names
for current_index in image_index:
image_current_path=listOfFileNames[current_index]
st.session_state['init_data']['image_current_paths'].append(image_current_path)
st.session_state['init_data']['current_image_names'].append(image_current_path[-10:-4])
st.session_state['init_data']['current_image_names']=np.array(st.session_state['init_data']['current_image_names'])
st.session_state['init_data']['image_current_paths']=np.array(st.session_state['init_data']['image_current_paths'])
del image_index,archive,listOfFileNames,image_index_all,current_index,image_current_path
def Load_Image(current_index):
archive = zipfile.ZipFile(st.session_state['init_data']['zip_file'], 'r')
image_current_path=st.session_state['init_data']['image_current_paths'][current_index]
image_file=Image.open(BytesIO(archive.read(image_current_path)))
if not (image_file.size[0] == 224 and image_file.size[1] == 224):
image_file=image_file.resize((224, 224))
del image_current_path,archive
return np.array(image_file)
def Show_Info():
st.sidebar.markdown('#### Questions List:')
st.sidebar.write(st.session_state['init_data']['feature_questions'])
# st.sidebar.write(st.session_state['init_data'])
def Load_Data(total_images_number):
st.session_state['init_data']={
'images_selected':False,
'show_results':False,
'start_game':False,
'finished_game':False,
'reload_game':False,
'award':100,
'token_type':0,
'questions_index':0,
'selected_question':'Are you a MAN?',
'first_question':'Are you a MAN?',
'user_input':'A picture of a person',
'user_input_querys1':'A picture of a person',
'user_input_querys2':'A picture of a person',
'current_querys':['A picture of a person','A picture of a person'],
'selected_winner':'Winner not selected',
'current_winner_index':-1,
'N_images':total_images_number,
'n_images':total_images_number,
'zip_file':'guess_who_images.zip',
'previous_zip_file':'guess_who_images.zip',
'Showed_image_names':[],
'current_images_discarted':np.zeros((total_images_number)),
'winner_options':[],
'current_image_names':[],
'image_current_paths':[],
'clip_tokens':['A picture of a person','A picture of a person'],
'path_info':'D:/Datasets/Celeba/',
'path_imgs':'D:/Datasets/Celeba/img_celeba/',
'querys_list_yes':['A picture of a male person', 'A picture of a female person', 'A picture of an attractive person', 'A picture of a fat person', 'A picture of a young person',
'A picture of a receding-hairline person ', 'A picture of a smily person', 'A picture of a bald person',
'A picture of a person with black hair', 'A picture of a person with brown hair', 'A picture of a person with blond hair', 'A picture of a person with red hair',
'A picture of a person with gray hair', 'A picture of a person with straight hair', 'A picture of a person with wavy hair',
'A picture of a glabrous person', 'A picture of a mustachioed person', 'A picture of a person with bushy sideburns',
'A picture of a person with goatee', 'A picture of a person with heavy makeup', 'A picture of a person with eyeglasses ',
'A picture of a person with bushy eyebrows', 'A picture of a double chin person',
'A picture of a person with high cheekbones', 'A picture of a person with opened mouth',
'A picture of a person with narrow eyes', 'A picture of a person with an oval-shaped face',
'A picture of a person wiht pale skin', 'A picture of a pointy-nosed person ', 'A picture of a person with colored cheeks',
"A picture of a five o'clock shadow person", 'A picture of a rounded eyebrows person', 'A picture of a person with bags under the eyes',
'A picture of a person with bangs', 'A picture of a wide-liped person', 'A picture of a big-nosed person',
'A picture of a person with earrings', 'A picture of a person with hat',
'A picture of a person with lipstick', 'A picture of a necklaced person',
'A picture of a necktied person'
],
'querys_list_no':['A picture of a female person', 'A picture of a male person', 'A picture of an ugly person', 'A picture of a slender person', 'A picture of a aged person',
'A picture of a hairy person', 'A picture of a person', 'A picture of a hairy person',
'A picture of a person', 'A picture of a person', 'A picture of a person', 'A picture of a person',
'A picture of a person', 'A picture of a person with wavy hair', 'A picture of a person with straight hair',
'A picture of a unshaved person', 'A picture of a person', 'A picture of a person with shaved sideburns',
'A picture of a person', 'A picture of a person with light makeup', 'A picture of a person ',
'A picture of a person with sparse eyebrows', 'A picture of a person with a double chin',
'A picture of a person with low cheekbones', 'A picture of a person with closed mouth',
'A picture of a person with wide eyes', 'A picture of a person with a normal-shaped face',
'A picture of a person wiht tanned skin', 'A picture of a flat-nosed person', 'A picture of a person with pale cheeks',
"A picture of a shaved or unshaved person", 'A picture of a person a straight eyebrows person', 'A picture of a person with with smooth skin under the eyes',
'A picture of a person', 'A picture of a narrow-liped person', 'A picture of a small-nosed person',
'A picture of a person', 'A picture of a person with hair',
'A picture of a person with natural lips', 'A picture of a person',
'A picture of a person'
],
'feature_questions':['Are you a MAN?', 'Are you a WOMAN?', 'Are you an ATTRACTIVE person?', 'Are you an CHUBBY person?', 'Are you YOUNG?',
'Are you a person with RECEDING HAIRLINES?', 'Are you SMILING?','Are you BALD?',
'Do you have BLACK HAIR?', 'Do you have BROWN HAIR?', 'Do you have BLOND HAIR?', 'Do you have RED HAIR?',
'Do you have GRAY HAIR?', 'Do you have STRAIGHT HAIR?', 'Do you have WAVY HAIR?',
'Do you have a BEARD?', 'Do you have a MUSTACHE?', 'Do you have SIDEBURNS?',
'Do you have a GOATEE?', 'Do you wear HEAVY MAKEUP?', 'Do you wear EYEGLASSES?',
'Do you have BUSHY EYEBROWS?', 'Do you have a DOUBLE CHIN?',
'Do you have a high CHEECKBONES?', 'Do you have SLIGHTLY OPEN MOUTH?',
'Do you have NARROWED EYES?', 'Do you have an OVAL FACE?',
'Do you have PALE SKIN?', 'Do you have a POINTY NOSE?', 'Do you have ROSY CHEEKS?',
"Do you have FIVE O'CLOCK SHADOW?", 'Do you have ARCHED EYEBROWS?', 'Do you have BUGS UNDER your EYES?',
'Do you have BANGS?', 'Do you have a BIG LIPS?', 'Do you have a BIG NOSE?',
'Are you wearing EARRINGS?', 'Are you wearing a HAT?',
'Are you wearing LIPSTICK?', 'Are you wearing NECKLACE?',
'Are you wearing NECKTIE?'],
'previous_discarding_images_number':0,
'function_predict':Predict_0_vs_1,
'image_current_probs':np.zeros((total_images_number,2)),
'image_current_predictions':np.zeros((total_images_number))+2}
Select_Images_Randomly()
del total_images_number
## --------------- MAIN FUCTION ---------------
def Main_Program():
## SIDEBAR
st.sidebar.markdown('# OPTIONS PANEL')
## Reset App APP
Reset_App = st.sidebar.button('RESET GAME', key='Reset_App')
## Images number
st.sidebar.markdown('# Number of images')
Total_Images_Number=st.sidebar.number_input('Select the number of images of the game and press "RESET GAME"', min_value=5, max_value=40, value=20,
step=1, format='%d', key='Total_Images_Number', help=None)
## INITIALIZATIONS
Feature_Options=['Ask a Question', 'Create your own query', 'Create your own 2 querys','Select a Winner']
## Load data to play
if 'init_data' not in st.session_state:
Load_Data(20)
## Title
if st.session_state['init_data']['finished_game']:
st.markdown("<h1 style='text-align:left; float:left; color:blue; margin:0px;'>Guess Who?</h1>", unsafe_allow_html=True)
else:
st.markdown("<h1 style='text-align:left; float:left; color:blue; margin:0px;'>Guess Who?</h1><h2 style='text-align:right; float:right; color:gray; margin:0px;'>score: "+ str(st.session_state['init_data']['award'])+"</h2>", unsafe_allow_html=True)
## GAME
if Reset_App:
Load_Data(Total_Images_Number)
Restart_App = st.button('GO TO IMAGES SELECTION TO START A NEW GAME', key='Restart_App')
else:
## FINISHED GAME BUTTON TO RELOAD GAME
if st.session_state['init_data']['finished_game']:
Restart_App = st.button('GO TO IMAGES SELECTION TO START NEW GAME', key='Restart_App')
if st.session_state['init_data']['award']==1 or st.session_state['init_data']['award']==-1:
st.markdown("<h1 style='text-align:left; float:left; color:black; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>¡¡¡ FINISHED WITH</h1><h1 style='text-align:left; float:left; color:green; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>"+str(st.session_state['init_data']['award'])+"</h1><h1 style='text-align:left; float:left; color:black; margin:0px;'>POINT !!!</h1>", unsafe_allow_html=True)
else:
st.markdown("<h1 style='text-align:left; float:left; color:black; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>¡¡¡ FINISHED WITH</h1><h1 style='text-align:left; float:left; color:green; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>"+str(st.session_state['init_data']['award'])+"</h1><h1 style='text-align:left; float:left; color:black; margin:0px;'>POINTS !!!</h1>", unsafe_allow_html=True)
else:
st.session_state['init_data']['images_selected']=False
## INITIALIZATION (SELECT FIGURES)
if not st.session_state['init_data']['start_game']:
## Select images source
st.sidebar.markdown('## Image selection source:')
Selected_Images_Source=st.sidebar.selectbox('(Choose between default random images or specific source path)',
['Use Celeba dataset random images',
'Use friends random images',
'Use family random images',
'Use images from specific path'],
index=0, key='Selected_Images_Source', help=None)
## Select images source - Celeba default
if Selected_Images_Source=='Use Celeba dataset random images':
st.session_state['init_data']['zip_file']='guess_who_images.zip'
if st.session_state['init_data']['zip_file']!=st.session_state['init_data']['previous_zip_file']:
st.session_state['init_data']['previous_zip_file']=st.session_state['init_data']['zip_file']
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Default source text
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Choose the images you like.</h2>",
unsafe_allow_html=True)
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin:0px;'>Press the button to randomly modify the selected images.</h3>",
unsafe_allow_html=True)
## Button - randomly change Celeba images
Random_Images = st.button('CHANGE IMAGES', key='Random_Images')
if Random_Images:
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Button - start game
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to start the game.</h2>", unsafe_allow_html=True)
Use_Images = st.button('START GAME', key='Use_Images')
if Use_Images:
## Choose winner and start game
st.session_state['init_data']['current_winner_index']=random.choice(list(range(0,st.session_state['init_data']['N_images'])))
st.session_state['init_data']['start_game']=True
st.session_state['init_data']['images_selected']=True
## Select images source - Friends default
if Selected_Images_Source=='Use friends random images':
st.session_state['init_data']['zip_file']='guess_who_images_friends.zip'
if st.session_state['init_data']['zip_file']!=st.session_state['init_data']['previous_zip_file']:
st.session_state['init_data']['previous_zip_file']=st.session_state['init_data']['zip_file']
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Default source text
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Choose the images you like.</h2>",
unsafe_allow_html=True)
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin:0px;'>Press the button to randomly modify the selected images.</h3>",
unsafe_allow_html=True)
## Button - randomly change Celeba images
Random_Images = st.button('CHANGE IMAGES', key='Random_Images')
if Random_Images:
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Button - start game
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to start the game.</h2>", unsafe_allow_html=True)
Use_Images = st.button('START GAME', key='Use_Images')
if Use_Images:
## Choose winner and start game
st.session_state['init_data']['current_winner_index']=random.choice(list(range(0,st.session_state['init_data']['N_images'])))
st.session_state['init_data']['start_game']=True
st.session_state['init_data']['images_selected']=True
## Select images source - Celeba default
if Selected_Images_Source=='Use family random images':
st.session_state['init_data']['zip_file']='guess_who_images_family.zip'
if st.session_state['init_data']['zip_file']!=st.session_state['init_data']['previous_zip_file']:
st.session_state['init_data']['previous_zip_file']=st.session_state['init_data']['zip_file']
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Default source text
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Choose the images you like.</h2>",
unsafe_allow_html=True)
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin:0px;'>Press the button to randomly modify the selected images.</h3>",
unsafe_allow_html=True)
## Button - randomly change Celeba images
Random_Images = st.button('CHANGE IMAGES', key='Random_Images')
if Random_Images:
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Button - start game
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to start the game.</h2>", unsafe_allow_html=True)
Use_Images = st.button('START GAME', key='Use_Images')
if Use_Images:
## Choose winner and start game
st.session_state['init_data']['current_winner_index']=random.choice(list(range(0,st.session_state['init_data']['N_images'])))
st.session_state['init_data']['start_game']=True
st.session_state['init_data']['images_selected']=True
## Select images source - Celeba specific path
if Selected_Images_Source=='Use images from specific path':
## Specific source text
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Choose the images you like.</h2>",
unsafe_allow_html=True)
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin:0px;'>To use images from specific path, press 'Use Path'. Press it again to randomly modify the selected images.</h3>",
unsafe_allow_html=True)
Uploaded_File = st.file_uploader("Select images to play", type=[".zip"],accept_multiple_files=False, key="Uploaded_file")
if Uploaded_File is not None:
st.session_state['init_data']['zip_file']= Uploaded_File
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
## Button - randomly change Celeba images
Random_Images = st.button('CHANGE IMAGES', key='Random_Images')
if Random_Images:
Select_Images_Randomly()
st.session_state['init_data']['winner_options']=st.session_state['init_data']['current_image_names']
if not (st.session_state['init_data']['zip_file']=='guess_who_images.zip' or st.session_state['init_data']['zip_file']=='guess_who_images_friends.zip' or st.session_state['init_data']['zip_file']=='guess_who_images_family.zip'):
## Button - start game
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to start the game.</h2>", unsafe_allow_html=True)
Use_Images = st.button('START GAME', key='Use_Images')
if Use_Images:
## Choose winner and start game
st.session_state['init_data']['current_winner_index']=random.choice(list(range(0,st.session_state['init_data']['N_images'])))
st.session_state['init_data']['start_game']=True
st.session_state['init_data']['images_selected']=True
## RUN GAME
if st.session_state['init_data']['start_game']:
## Text - Select query type (game mode)
if st.session_state['init_data']['images_selected']:
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>3. Select a type of Query to play.</h2>", unsafe_allow_html=True)
else:
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>1. Select a type of Query to play.</h2>", unsafe_allow_html=True)
## SelectBox - Select query type (game mode)
Selected_Feature=st.selectbox('Ask a question from a list, create your query or select a winner:', Feature_Options,
index=0,
key='selected_feature', help=None)
## SHOW ELEMENTS - QUESTIONS MODE
if Selected_Feature=='Ask a Question':
## Game mode id
st.session_state['init_data']['token_type']=0
## Text - Questions mode
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Select a Question from the list.</h3>", unsafe_allow_html=True)
## SelectBox - Select question
Selected_Question=st.selectbox('Suggested questions:', st.session_state['init_data']['feature_questions'],
index=0,
key='Selected_Question', help=None)
st.session_state['init_data']['selected_question']=Selected_Question # Save Info
## Current question index
if Selected_Question not in st.session_state['init_data']['feature_questions']:
Selected_Question=st.session_state['init_data']['feature_questions'][0]
st.session_state['init_data']['questions_index']=st.session_state['init_data']['feature_questions'].index(Selected_Question)
## Text - Show current question
st.markdown("<h3 style='text-align:center; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>Current Question: </h3><h3 style='text-align:left; float:center; color:green; margin:0px;'>"+Selected_Question+"</h3>", unsafe_allow_html=True)
## Button - Use current question
Check_Question = st.button('USE THIS QUESTION', key='Check_Question')
st.session_state['init_data']['button_question']=Check_Question # Save Info
## Check current question
if st.session_state['init_data']['show_results']:
st.session_state['init_data']['show_results']=False
else:
if Check_Question:
if Selected_Question=='Are you bald?':
st.session_state['init_data']['current_querys']=['A picture of a male person','A picture of a female person',
'A picture of a bald man','A picture of a haired man',
'A picture of a bald person','A picture of a person']
st.session_state['init_data']['function_predict']=Predict_bald
elif Selected_Question=='Do you have BLACK HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a black-haired person',
'A picture of a tawny-haired person',
'A picture of a blond-haired person',
'A picture of a gray-haired person',
'A picture of a red-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif Selected_Question=='Do you have BROWN HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a tawny-haired person',
'A picture of a black-haired person',
'A picture of a blond-haired person',
'A picture of a gray-haired person',
'A picture of a red-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif Selected_Question=='Do you have BLOND HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a blond-haired person',
'A picture of a tawny-haired person',
'A picture of a black-haired person',
'A picture of a gray-haired person',
'A picture of a red-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif Selected_Question=='Do you have RED HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a red-haired person',
'A picture of a tawny-haired person',
'A picture of a blond-haired person',
'A picture of a gray-haired person',
'A picture of a black-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif Selected_Question=='Do you have GRAY HAIR?':
st.session_state['init_data']['current_querys']=['A picture of a gray-haired person',
'A picture of a tawny-haired person',
'A picture of a blond-haired person',
'A picture of a black-haired person',
'A picture of a red-haired person',
'A picture of a green-haired person',
'A picture of a blue-haired person',
'A picture of a bald-head person']
st.session_state['init_data']['function_predict']=Predict_0_vs_all
elif not st.session_state['init_data']['show_results']:
st.session_state['init_data']['current_querys']=[st.session_state['init_data']['querys_list_yes'][st.session_state['init_data']['questions_index']],
st.session_state['init_data']['querys_list_no'][st.session_state['init_data']['questions_index']]]
st.session_state['init_data']['function_predict']=Predict_0_vs_1
CLIP_Process()
st.session_state['init_data']['function_predict']()
st.session_state['init_data']['show_results']=True
## SHOW ELEMENTS - 1 QUERY MOD
if Selected_Feature=='Create your own query':
## Game mode id
st.session_state['init_data']['token_type']=-1
## Text - Query mode
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Write your own query and press the button.</h3>", unsafe_allow_html=True)
## TextInput - Select query
User_Input = st.text_input('It is recommended to use a text like: "A picture of a ... person" or "A picture of a person ..." (CLIP will check -> "Your query" vs "A picture of a person" )', 'A picture of a person', key='User_Input', help=None)
st.session_state['init_data']['user_input']=User_Input # Save Info
## Text - Show current query
st.markdown("<h3 style='text-align:center; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>Current Query: </h3><h3 style='text-align:left; float:center; color:green; margin:0px;'>"+User_Input+"</h3>", unsafe_allow_html=True)
## Button - Use current query
Check_Query = st.button('USE MY OWN QUERY', key='Check_Query')
st.session_state['init_data']['button_query1']=Check_Query # Save Info
## Check current question
if st.session_state['init_data']['show_results']:
st.session_state['init_data']['show_results']=False
else:
if Check_Query:
if User_Input!='A picture of a person':
st.session_state['init_data']['current_querys']=['A Picture of a person',User_Input]
st.session_state['init_data']['function_predict']=Predict_1_vs_0
CLIP_Process()
st.session_state['init_data']['function_predict']()
st.session_state['init_data']['show_results']=True
else:
st.markdown("<h3 style='text-align:left; float:left; color:red; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Your query must be different of 'A picture of a person'.</h3>", unsafe_allow_html=True)
## SHOW ELEMENTS - 2 QUERYS MODE
if Selected_Feature=='Create your own 2 querys':
## Game mode id
st.session_state['init_data']['token_type']=-2
## Text - Querys mode
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Write your own querys by introducing 2 opposite descriptions.</h3>", unsafe_allow_html=True)
## SelectBox - Select querys
User_Input_Querys1 = st.text_input('Write your "True" query:', 'A picture of a person',
key='User_Input_Querys1', help=None)
User_Input_Querys2 = st.text_input('Write your "False" query:', 'A picture of a person',
key='User_Input_Querys2', help=None)
st.session_state['init_data']['user_input_querys1']=User_Input_Querys1 # Save Info
st.session_state['init_data']['user_input_querys2']=User_Input_Querys2 # Save Info
## Text - Show current querys
st.markdown("<h3 style='text-align:center; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>Current Querys: </h3><h3 style='text-align:left; float:center; color:green; margin:0px;'>"+User_Input_Querys1+' vs '+User_Input_Querys2+"</h3>", unsafe_allow_html=True)
## Button - Use current querys
Check_Querys = st.button('USE MY OWN QUERYS', key='Check_Querys')
st.session_state['init_data']['button_query2']=Check_Querys # Save Info
## Check current querys
if st.session_state['init_data']['show_results']:
st.session_state['init_data']['show_results']=False
else:
if Check_Querys:
if User_Input_Querys1!=User_Input_Querys2:
st.session_state['init_data']['current_querys']=[User_Input_Querys1,User_Input_Querys2]
st.session_state['init_data']['function_predict']=Predict_0_vs_1
CLIP_Process()
st.session_state['init_data']['function_predict']()
st.session_state['init_data']['show_results']=True
else:
st.markdown("<h3 style='text-align:left; float:left; color:red; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Your two own querys must be different.</h3>", unsafe_allow_html=True)
## SHOW ELEMENTS - WINNER MODE
if Selected_Feature=='Select a Winner':
## Game mode id
st.session_state['init_data']['token_type']=-3
## Text - Winner mode
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Select a Winner picture name.</h3>", unsafe_allow_html=True)
## SelectBox - Select winner
# st.session_state['init_data']['winner_options']=['Winner not selected']
# st.session_state['init_data']['winner_options'].extend(st.session_state['init_data']['current_image_names'])
# if st.session_state['init_data']['selected_winner'] not in st.session_state['init_data']['winner_options']:
# st.write(st.session_state['init_data']['selected_winner'])
# st.write(st.session_state['init_data']['winner_options'])
Selected_Winner=st.selectbox('If you are inspired, Select a Winner image directly:', st.session_state['init_data']['winner_options'],
index=0, key='Selected_Winner', help=None)
st.session_state['init_data']['selected_winner']=Selected_Winner # Save Info
## Text - Show current winner
st.markdown("<h3 style='text-align:center; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>Current Winner: </h3><h3 style='text-align:left; float:center; color:green; margin:0px;'>"+Selected_Winner+"</h3>", unsafe_allow_html=True)
## Button - Use current winner
Check_Winner = st.button('CHECK THIS WINNER', key='Check_Winner')
st.session_state['init_data']['button_winner']=Check_Winner # Save Info
## Check current winner
if st.session_state['init_data']['show_results']:
st.session_state['init_data']['show_results']=False
else:
if Check_Winner:
if Selected_Winner in st.session_state['init_data']['current_image_names']:
st.session_state['init_data']['selected_winner_index']=np.where(Selected_Winner==st.session_state['init_data']['current_image_names'])[0]
st.session_state['init_data']['image_current_predictions']=np.zeros(st.session_state['init_data']['n_images'])
st.session_state['init_data']['image_current_predictions'][st.session_state['init_data']['selected_winner_index']]=1
st.session_state['init_data']['show_results']=True
# Delete Winner elements
# del st.session_state['Selected_Winner']
else:
st.markdown("<h3 style='text-align:left; float:left; color:red; margin-left:0px; margin-right:0px; margin-top:15px; margin-bottom:-10px;'>Your must select a not discarded picture.</h3>", unsafe_allow_html=True)
## ACTIONS SHOWING RESULTS
if st.session_state['init_data']['show_results']:
## Continue game
if not np.sum(st.session_state['init_data']['current_images_discarted']==0)==1:
if st.session_state['init_data']['images_selected']:
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>4. Press the button to continue.</h2>", unsafe_allow_html=True)
else:
st.markdown("<h2 style='text-align:left; float:left; color:black; margin:0px;'>2. Press the button to continue.</h2>", unsafe_allow_html=True)
## Button - Next query
Next_Query=st.button('NEXT QUERY', key='Next_Query')
## Show current results
if st.session_state['init_data']['token_type']==0:
if st.session_state['init_data']['image_current_predictions'][st.session_state['init_data']['current_winner_index']]:
st.markdown("<h3 style='text-align:left; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>"+st.session_state['init_data']['selected_question']+"</h3><h3 style='text-align:left; float:left; color:green; margin:0px;'>YES</h3>", unsafe_allow_html=True)
else:
st.markdown("<h3 style='text-align:left; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>"+st.session_state['init_data']['selected_question']+"</h3><h3 style='text-align:left; float:left; color:green; margin:0px;'>NO</h3>", unsafe_allow_html=True)
if st.session_state['init_data']['token_type']==-1:
if st.session_state['init_data']['image_current_predictions'][st.session_state['init_data']['current_winner_index']]:
st.markdown("<h3 style='text-align:left; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>"+st.session_state['init_data']['user_input']+"</h3><h3 style='text-align:left; float:left; color:green; margin:0px;'>TRUE</h3>", unsafe_allow_html=True)
else:
st.markdown("<h3 style='text-align:left; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>"+st.session_state['init_data']['user_input']+"</h3><h3 style='text-align:left; float:left; color:green; margin:0px;'>FALSE</h3>", unsafe_allow_html=True)
if st.session_state['init_data']['token_type']==-2:
if st.session_state['init_data']['image_current_predictions'][st.session_state['init_data']['current_winner_index']]:
st.markdown("<h3 style='text-align:left; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>The most accurate query is:</h3><h3 style='text-align:left; float:left; color:green; margin:0px;'>"+st.session_state['init_data']['user_input_querys1']+"</h3>", unsafe_allow_html=True)
else:
st.markdown("<h3 style='text-align:left; float:left; color:blue; margin-left:0px; margin-right:25px; margin-top:0px; margin-bottom:0px;'>The most accurate query is:</h3><h3 style='text-align:left; float:left; color:green; margin:0px;'>"+st.session_state['init_data']['user_input_querys2']+"</h3>", unsafe_allow_html=True)
if st.session_state['init_data']['token_type']==-3:
if not st.session_state['init_data']['selected_winner']==st.session_state['init_data']['current_image_names'][st.session_state['init_data']['current_winner_index']]:
st.markdown("<h3 style='text-align:left; float:left; color:gray; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>The winner picture is not:</h3><h3 style='text-align:left; float:center; color:red; margin:0px;'>"+st.session_state['init_data']['selected_winner']+"</h3>", unsafe_allow_html=True)
## CREATE IMAGES TO SHOW
Showed_Images=Show_images()
st.session_state['init_data']['Showed_image_names']=st.session_state['init_data']['current_image_names']
## APPLY DISCARDING
if st.session_state['init_data']['show_results']:
st.session_state['init_data']['previous_discarding_images_number']=st.session_state['init_data']['n_images']
Image_discarding()
## penalty - game not finished
if st.session_state['init_data']['n_images']>1:
st.session_state['init_data']['award']=st.session_state['init_data']['award']-st.session_state['init_data']['n_images']
## penalty - "select winner" option used
if st.session_state['init_data']['token_type']==-3:
st.session_state['init_data']['award']=st.session_state['init_data']['award']-1-(st.session_state['init_data']['N_images']-st.session_state['init_data']['previous_discarding_images_number'])
## penalty - no image is discarted
if st.session_state['init_data']['previous_discarding_images_number']==st.session_state['init_data']['n_images']:
st.session_state['init_data']['award']=st.session_state['init_data']['award']-5
## SHOW FINAL RESULTS
if st.session_state['init_data']['finished_game']:
st.session_state['init_data']['reload_game']=True
else:
## CHECK FINISHED GAME
if np.sum(st.session_state['init_data']['current_images_discarted']==0)==1 and not st.session_state['init_data']['finished_game']:
st.session_state['init_data']['finished_game']=True
st.markdown("<h1 style='text-align:left; float:left; color:black; margin-left:0px; margin-right:15px; margin-top:0px; margin-bottom:0px;'>You found the Winner picture:</h1><h1 style='text-align:left; float:left; color:green; margin:0px;'>"+st.session_state['init_data']['current_image_names'][st.session_state['init_data']['current_winner_index']]+"</h1>", unsafe_allow_html=True)
Finsih_Game = st.button('FINISH GAME', key='Finsih_Game')
## SHOW CURRENT IMAGES
st.image(Showed_Images, use_column_width=False, caption=st.session_state['init_data']['Showed_image_names'])
del Showed_Images
## RELOAD GAME
if st.session_state['init_data']['reload_game']:
Load_Data(st.session_state['init_data']['N_images'])
## SHOW EXTRA INFO
Show_Info()
## --------------- CACHE FUCTION ---------------
@st.cache(ttl=12*3600)
def Load_CLIP():
return clip.load("ViT-B/32", device="cpu", jit=False)
## --------------- STREAMLIT APP ---------------
st.set_page_config(
layout="wide",
page_icon='Logo DIMAI.png',
page_title='QuienEsQuien',
initial_sidebar_state="collapsed"
)
## CLEAR RESOURCES
Main_Program()
gc.collect()
# caching.clear_cache()
# torch.cuda.empty_cache()
## SHOW INFO (cpu, memeory)
# st.sidebar.write(psutil.cpu_percent()) ## show info (cpu, memeory)
# st.sidebar.write(psutil.virtual_memory()) ## show info (cpu, memeory)
| StarcoderdataPython |
8085832 | __version__ = '19.6'
| StarcoderdataPython |
11323899 | <gh_stars>0
from HappyDogs.lib.decorators import render
from HappyDogs.lib.utils import get_request_data
from django.views.decorators.http import require_http_methods
from models import BoardingVisit, Dog
from datetime import date
from django.db.models import Avg, Max, Min
from utils import weeks_beetwen
from utils import parse_date
from django.urls import reverse
from django.views.decorators.csrf import csrf_exempt
def happy_dogs(template=None, content_type=None, **kwargs):
path = 'happy_dogs'
template = "%s/%s" % (path, template)
return render(path=template, content_type=content_type, **kwargs)
@happy_dogs(template="home.html", content_type="html")
def happy_dogs_home(request):
return {
}
@happy_dogs(template="dog.html", content_type="html")
def happy_dogs_dogs(request):
return {
}
@happy_dogs(template="dogs.html", content_type="html")
def happy_dogs_dog(request, dog_uuid=None):
return {
}
@happy_dogs(template="dogs.html", content_type="html")
def happy_dogs_create_dogs(request):
url = reverse('happy_dogs_create_dog_visits')
Dog.generate_random_dog()
return{
'redirect' : True,
'url' : url,
}
@happy_dogs(template="dogs.html", content_type="html")
def happy_dogs_create_dog_visits(request):
buck_size = 10
url = reverse('happy_dogs_create_dog_visits')
index = request.GET.get('index') or 0
data = Dog.objects.all()
try:
index = int(index)
except:
index = data.count()
if index < data.count():
if index + buck_size < data.count():
end = index + buck_size
else:
end = data.count() - 1
dogs = data[index:end]
for dog in dogs:
dog.generate_random_visit
index = end
index += 1
if index < data.count():
url = "{}?index={}".format(url, index)
else:
url = reverse('happy_dogs_home')
return{
'redirect' : True,
'url' : url,
}
@happy_dogs(template="dogs.html", content_type="html")
def happy_dogs_create_data(request):
url = reverse('happy_dogs_create_dogs')
Dog.clear()
BoardingVisit.clear()
return{
'redirect' : True,
'url' : url,
}
@csrf_exempt
@happy_dogs(template="", content_type="json")
def happy_dogs_rest_add_dogs_visit(request):
error_message = ""
added = False
start_date = request.GET.get('start_date') or None
if start_date is not None and start_date.strip():
start_date = "{}".format(start_date)
end_date = request.GET.get('end_date') or None
if end_date is not None and end_date.strip():
end_date = "{}".format(end_date)
uuid = request.GET.get('uuid') or None
if uuid is not None and uuid.strip():
dog = Dog.objects.filter(uuid = uuid).first()
if dog is not None:
try:
visit, added = dog.add_visit(start_date=start_date, end_date=end_date)
saved = True
except Exception, e:
error_message = "{}".format(e)
else:
error_message = "Dog not selected"
else:
error_message = "Dog not selected"
data = {
'added' : added,
'error_message' : error_message,
}
response_data = {
'dog' : data,
}
return{
'response_data' : response_data,
}
@csrf_exempt
@happy_dogs(template="", content_type="json")
def happy_dogs_rest_detail(request):
detail = []
requested_date = request.GET.get('date') or None
if requested_date is not None:
requested_date = parse_date(input_date = requested_date)
if requested_date is not None:
data = BoardingVisit.date_visits(date_obj=requested_date)
for d in data:
detail.append({
'dog_name' : d.dog_name,
'dog_url' : d.dog_url,
'start_date' : d.start_date.strftime('%m/%d/%Y'),
'end_date' : d.end_date.strftime('%m/%d/%Y'),
})
response_data = {
'detail' : detail
}
return{
'response_data' : response_data,
}
@csrf_exempt
@happy_dogs(template="", content_type="json")
def happy_dogs_rest_dogs(request):
data = []
dogs = Dog.objects.all()
for d in dogs:
data.append({
'full_name' : d.full_name,
'url' : d.url,
'uuid' : d.uuid,
'visits' : d.visits,
'in_house' : d.is_the_house(),
'in_house_label': d.is_the_house_label(),
})
response_data = {
'dogs' : data,
}
return{
'response_data' : response_data,
}
@csrf_exempt
@happy_dogs(template="", content_type="json")
def happy_dogs_rest_dog(request):
data = {}
uuid = request.GET.get('uuid') or None
if uuid is not None:
dog = Dog.objects.filter(uuid = uuid).first()
if dog is not None:
data = {
'full_name' : dog.full_name,
'first_name' : dog.first_name,
'last_name' : dog.last_name,
'url' : dog.url,
'uuid' : dog.uuid,
'visits' : dog.visits_detail,
'in_house' : dog.is_the_house(),
'in_house_label': dog.is_the_house_label(),
}
response_data = {
'dog' : data,
}
return{
'response_data' : response_data,
}
@csrf_exempt
@happy_dogs(template="", content_type="json")
def happy_dogs_rest_add_dog(request):
error_message = ""
added = False
first_name = request.GET.get('first_name') or None
last_name = request.GET.get('last_name') or None
if first_name is not None and first_name.strip():
try:
dog, added = Dog.add(first_name=first_name , last_name=last_name)
saved = True
except Exception, e:
error_message = "{}".format(e)
data = {
'added' : added,
'error_message' : error_message,
}
response_data = {
'dog' : data,
}
return{
'response_data' : response_data,
}
@csrf_exempt
@happy_dogs(template="", content_type="json")
def happy_dogs_rest_update_dog(request):
data = {}
uuid = request.GET.get('uuid') or None
first_name = request.GET.get('first_name') or None
last_name = request.GET.get('last_name') or None
error_message = ""
saved = False
if uuid is not None:
dog = Dog.objects.filter(uuid = uuid).first()
if first_name is not None and first_name.strip():
try:
dog = dog.update(first_name = first_name , last_name = last_name)
saved = True
except Exception,e:
error_message = "{}".format(e)
if dog is not None:
data = {
'full_name' : dog.full_name,
'first_name' : dog.first_name,
'last_name' : dog.last_name,
'url' : dog.url,
'uuid' : dog.uuid,
'visits' : dog.visits_detail,
'in_house' : dog.is_the_house(),
'in_house_label': dog.is_the_house_label(),
'saved' : saved,
'error_message' : error_message,
}
response_data = {
'dog' : data,
}
return{
'response_data' : response_data,
}
@csrf_exempt
@happy_dogs(template="", content_type="json")
def happy_dogs_rest_visits(request):
weeks = []
start_date = request.GET.get('start_date') or None
if start_date is not None:
start_date = parse_date(input_date = start_date)
end_date = request.GET.get('end_date') or None
if end_date is not None:
end_date = parse_date(input_date = end_date)
visits = BoardingVisit.visits(start_date=start_date, end_date=end_date)
daysList = []
if visits is not None and visits.exists():
today = date.today()
max_min = visits.aggregate(Max('start_date'), Min('start_date'), Max('end_date'), Min('end_date'))
min_date = None
max_date = None
range = None
if max_min is not None:
end_date__max = max_min.get('end_date__max') or None
end_date__min = max_min.get('end_date__min') or None
start_date__max = max_min.get('start_date__max') or None
start_date__min = max_min.get('start_date__min') or None
if end_date__min is not None and start_date__min is not None:
if end_date__min < start_date__min:
min_date = end_date__min
else:
min_date = start_date__min
elif end_date__min is not None:
min_date = end_date__min
elif start_date__min is not None:
min_date = start_date__min
else:
min_date = today
if end_date__max is not None and start_date__max is not None:
if end_date__max > start_date__max:
max_date = end_date__max
else:
max_date = start_date__max
elif end_date__max is not None:
max_date = end_date__max
elif start_date__max is not None:
max_date = start_date__max
else:
max_date = today
if end_date is not None and start_date is not None:
weeks_count, days_count, starting_date, ending_date, daysList = weeks_beetwen(start_date=start_date, end_date=end_date, filtered=True)
else:
weeks_count, days_count, starting_date, ending_date, daysList = weeks_beetwen(start_date=min_date, end_date=max_date)
day_index = 1
week_data = []
week_counter = 1
for day in daysList:
dogs_in_house = BoardingVisit.dogs_in_house(date_obj=day.date())
week_data.append({
'dogs_in_house' : dogs_in_house,
'date' : day.strftime('%m/%d/%Y') ,
'weekday' : day.isoweekday(),
'small_date' : day.strftime('%b %d') ,
'week' : week_counter,
})
day_index += 1
if day_index > 7:
day_index
weeks.append(week_data)
week_data = []
day_index = 1
week_counter += 1
response_data = {
'weeks' : weeks,
}
return {
'response_data' : response_data
} | StarcoderdataPython |
11320049 | """
Image Finder
"""
import os
from pathlib import Path
from typing import Union
from ImageCopy.image_file import ImageFile
class ImageFinder:
"""
Utility to find all images in any given directory
"""
@staticmethod
def get_images_dict(directory: Union[str, Path], target_dir: Union[str, Path], filter_extensions: list = None) -> dict:
"""
Get dictionary (ImageFiles: target_directory) of every image in given directory
Uses _get_image_list to get a list of all images and converts it into a dictionary
:param directory: image source
:param target_dir: output directory
:param filter_extensions: filter files by specific extensions. If None specified files are filtered by standard
RAW and JPG extensions
:return: dictionary of ImageFile objects and output paths
"""
if filter_extensions is None:
filter_extensions = ImageFile.raw_extensions + ImageFile.image_extensions
image_list = ImageFinder._get_image_list(directory, filter_extensions)
return dict.fromkeys(image_list, target_dir)
@staticmethod
def _get_image_list(directory: Union[str, Path], filter_extensions: list) -> list:
"""
Recursively loads a list of all image files in the specified directory or any subdirectory.
:param directory: start searching for image files here
:param filter_extensions: filter files by specific extensions. If None specified files are filtered by standard
RAW and JPG extensions
:return: list of ImageFile objects
"""
files = []
for entry in Path(directory).iterdir():
if entry.is_file():
if filter_extensions is not None:
_, ext = os.path.splitext(str(entry))
ext = ext.lower()
if ext in filter_extensions:
files.append(ImageFile(entry, ext))
else:
files.append(entry)
else:
files += ImageFinder._get_image_list(entry, filter_extensions)
return files
| StarcoderdataPython |
3398762 | <reponame>followwwind/flask-web<filename>routes/hello.py
# -*- encoding:utf-8 -*-
"""
@author:wind
@time:2021/3/23 20:37
@desc:
"""
from config import app
@app.route('/hello')
def hello_world():
return 'Hello World!'
| StarcoderdataPython |
3442857 | <gh_stars>100-1000
from unittest import TestCase
from SingleNumber import SingleNumber
class TestSingleNumber(TestCase):
def test_singleNumber(self):
sn = SingleNumber()
self.assertTrue(sn.singleNumber(None) == 0)
self.assertTrue(sn.singleNumber([1]) == 1)
self.assertTrue(sn.singleNumber([1, 1, 2]) == 2)
self.assertTrue(sn.singleNumber([1, 1, 2, 2, 3]) == 3)
| StarcoderdataPython |
5127554 | <gh_stars>1-10
from django.apps import AppConfig
class DjangoSessionJwtConfig(AppConfig):
name = 'django_session_jwt'
| StarcoderdataPython |
1981153 | from textwrap import dedent, indent
from typing import Union, List
class MultiLineFormatter:
"""
\\* operator -> add de-dented text (+ \\\\n), if operand is a list -> add \\\\n-joined elements
% operator -> add de-dented text (+ \\\\n), if operand is a list -> add ,-joined elements
/ operator -> increase indentation level and add text (+ \\\\n)
// operator -> decrease indentation level and add text (+ \\\\n)
"""
def __init__(self, indent_str=' ' * 4) -> None:
self.text = ''
self.current_indent = ''
self.indent_str = indent_str
def __mul__(self, other: Union[str, List[str]]) -> 'MultiLineFormatter':
if isinstance(other, str):
return self.append(other)
else:
self.text += '\n'
return self.append_lines(other)
def __mod__(self, other: Union[str, List[str]]) -> 'MultiLineFormatter':
if isinstance(other, str):
return self.append(other, sep=', ')
else:
return self.append_lines(other, sep=', ')
def __truediv__(self, other: str) -> 'MultiLineFormatter':
if other:
return self.indent() * other
else:
return self.indent()
def __floordiv__(self, other) -> 'MultiLineFormatter':
return self.dedent() * other
def __str__(self) -> str:
return f'{self.text.strip()}\n'
def append(self, txt: str, sep='\n') -> 'MultiLineFormatter':
self.text += sep
if txt:
self.text += indent(dedent(txt), self.current_indent)
return self
def append_lines(self, lines, sep='\n') -> 'MultiLineFormatter':
self.text += sep.join(indent(dedent(t if t != '\n' else ''), self.current_indent) for t in lines if t)
return self
def indent(self) -> 'MultiLineFormatter':
self.current_indent += self.indent_str
return self
def dedent(self) -> 'MultiLineFormatter':
assert len(self.current_indent) >= len(self.indent_str)
self.current_indent = self.current_indent[:-len(self.indent_str)]
return self
| StarcoderdataPython |
8045176 | <reponame>dannyjeck-matroid/solaris
from . import bin, data, eval, nets, raster, tile, utils, vector
__version__ = "0.1.3"
| StarcoderdataPython |
3579372 | <gh_stars>1-10
# Copyright [2017] [<NAME>]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Control file to run simple carousel analysis and return
attenuation correction function. Based on IDL code by <NAME>.
This is the command line interface which is executed by:
python ../src/runCarouselFit.py
within the "test" directory. The location is important since the
code assumes that spectral data is available in the "spectra"
subdirectory which is currently in test.
Most of the fitting operations are implemented in the file carouselUtils.py.
This code should run on both Python 2 and Python 3.
"""
from __future__ import print_function
import sys
import os.path
import logging
import timeit
import time
import numpy as np
import itertools as it
from numpy.polynomial.polynomial import polyval
# import pyplot, checking if NOX11 env var set; if so use Agg plotting (no X11)
try:
import matplotlib
if 'NOX11' in os.environ:
matplotlib.use("Agg")
import matplotlib.pyplot as plt
except ImportError:
sys.exit("Error: cant find matplotlib")
from . import carouselUtils as cu
import pdb
markerErr = it.cycle((',', '+', '.', 'o', '*'))
def loadAll(string):
""" read both the carousel definition file and the data file with the
calibration data """
global carouselData, carouselCal, xSpec
if len(string)<3:
print("syntax: load <cardef> <carrun>")
return
if debug:
pdb.set_trace()
carouselData = cu.carousel(string[1])
if not carouselData.isValid():
print("** failed to load carousel data")
return
carouselCal = cu.carouselCalibrationData(string[2], carouselData)
if not carouselCal.isValid():
print("** failed to load calibration data")
return
xSpec = carouselCal.spec
# set guess for spectra peak to half the maximum X-ray voltage
startX[4] = carouselCal.voltage/2.
if carouselCal.spec.getS() is None and vary[4]==-1:
print("Using 'vary spectra 0' as pre-computed spectra not found")
vary[4:] = 0
if not xSpec.isValid():
sys.exit("** failed to load spectrum")
print(" Checking sample attenuation is in expected range:")
carouselCal.printImageStats(carouselData)
def showImg(string):
""" plot the n calibration images on one plot; user must kill
window to continue"""
if carouselCal == None:
print("must load data first")
return
try:
width = carouselCal.width
except:
print("** must read calibration data first")
return
plt.figure(FIG_IMG)
carouselCal.plotCalData(False, width)
plt.show(block=False)
def showSpec(string):
""" plot spectra of source along with filtered spectra and response spectra.
Note that these just use input data, not fitted data.
"""
if carouselCal == None:
print("must load data first")
return
# if no SpeKCalc data available, need to allow fitted spectra
if carouselCal.spec.getS() is None and vary[4]==-1:
print("** No spectrum is defined. Must use e.g. 'vary spectra 0'")
return
if xSpec.valid:
plt.figure(FIG_SPEC)
line = 0
if len(string) > 1 :
try:
line = int(string[1])
except ValueError:
print("Unrecognised line number")
return
if line<0 or line>=fit.nlines:
print("line must be >=0 and < ",fit.nlines)
return
if carouselCal.filterCount>0:
n = len(xSpec.getE())
try:
# this is PATCHED UP to get the Gaussian spectra if this option
# has been selected and to push this into the calculation pipeline
# HOWEVER the above plot has not been fixed up, neither has contant
# values for others??? Or is that done by calcWidths?
tw,dw,fw,ec,spectra = fit.calcWidths(res,fit.nlines,xSpec.getE())
varf = fit.varFilter
if isinstance(spectra,np.ndarray):
norm = np.sum(spectra)
attSpec = spectra/norm
else:
norm = np.sum(xSpec.getS())
attSpec = xSpec.getS()/norm
plt.plot(xSpec.getE(), attSpec,label='raw')
except:
line = 0
print("No fit, using preset values")
tw= [0.]
dw= [0.1]
varf = 1 # maybe?
fw= [carouselCal.filterWidth[varf]]
ec = xSpec.getE()
for i in range(carouselCal.filterCount):
# allow for unphysical -ve filter thicknesses
if varf==i:
expo = -carouselCal.filterAtten[i].getMu()[0:n]*fw[line]
else:
expo = -carouselCal.filterAtten[i].getMu()[0:n]*carouselCal.filterWidth[i]
expo[expo>600] = 600
attSpec = attSpec*np.exp(expo)
#attSpec = attSpec*np.exp(-carouselCal.filterAtten[i].getMu()[0:n]*carouselCal.filterWidth[i])
#
norma = np.sum(attSpec)
attSpec = attSpec/norma
plt.plot(xSpec.getE(),attSpec,label='filtered')
meanE = np.sum(attSpec*xSpec.getE())
dev2 = np.sum(attSpec*xSpec.getE()*xSpec.getE()) - meanE*meanE
print("For filtered spectrum:")
print("mean E =",meanE," std dev = ",np.sqrt(dev2)," total atten ratio = ",1.0/norma)
#
dE = xSpec.getE()[1]-xSpec.getE()[0]
nmean = int(meanE/dE)
# This expression is not valid if using fitted spectra
#print(" atten ratio at mean energy = ",xSpec.getS()[nmean]/(attSpec[nmean]*norma*norm))
#
expo = -carouselCal.targetAtten.getMu()[0:n]*tw[line]
expo[expo>600] = 600
attSpec = attSpec*np.exp(expo)
detWid = dw[line]
attDet = detWid*carouselCal.detectorAtten.getMu()[:len(attSpec)]
resSpec = attSpec*ec*(1.-np.exp(-attDet))
resSpec = resSpec/np.sum(resSpec)
plt.plot(xSpec.getE(),resSpec,label='response')
meanE = np.sum(resSpec*xSpec.getE())
dev2 = np.sum(resSpec*xSpec.getE()*xSpec.getE()) - meanE*meanE
print("For response spectrum:")
print("mean E =",meanE," std dev = ",np.sqrt(dev2)," total atten ratio = ",1.0/norma)
#
if carouselCal.whiteLevel>0:
print("Calculation used whiteLevel = ",carouselCal.whiteLevel)
#else:
# print("string=",string)
plt.legend()
plt.xlabel('KeV')
plt.ylabel('S(E) (normalised)')
plt.draw()
plt.show(block=False)
else:
print("must load data first")
def quitCarousel(string):
""" exit the command level """
sys.exit(0)
def showAtt(string):
""" 1D plots of attenuation of sample n"""
if carouselCal == None:
print("must load data first")
return
defline=400
plt.figure(FIG_ATT1D)
if len(string)>1:
try:
samp = int(string[1])
except:
print("failed to read sample value")
return
if len(string)>2:
try:
defline = int(string[2])
except:
print("failed to read line number")
if samp > -1 and samp < carouselCal.samples and defline > -1 and defline < carouselCal.lines:
z = carouselCal.getImage(samp)
plt.plot(z[defline,:])
plt.xlabel("Column number at line "+str(defline))
plt.ylabel("attenuation")
plt.draw()
plt.show(block=False)
else:
print("sample number out of range")
else:
for i in range(carouselCal.samples):
z = carouselCal.getImage(i)
plt.plot(z[defline,:])
plt.xlabel("Column number at line "+str(defline))
plt.ylabel("attenuation")
plt.draw()
plt.show(block=False)
return
def showCor(string):
""" plot the fitted correction curve from the polynomial data """
if not 'xtab' in globals():
print("No correction data available; run fitatt first")
return
if len(string)==1:
linevals = [ 0, int((len(xtab)-1)/2), len(xtab)-1 ]
else:
linevals = []
for i in range(len(string)-1):
try:
lineno=int(string[i+1])
except:
print("Bad integer value: ",string[i+1])
return
if lineno>=0 and lineno<=len(xtab):
linevals.append(lineno)
else:
print("skipping out of range value: ",lineno)
# check if we actual have more than one line fitted
if len( polyfit[:,0] ) > 1 :
print("lines=",linevals)
else:
print("Only 1 line fitted to data; using line 0")
linevals = [0]
#print(ytab[::30])
#for line in linevals:
# xvals = polyval( xtab[0,:], polyfit[line,::-1])
# print("pv=",polyfit[line,::-1])
# print(xvals[::30])
# print(" ")
mymax=np.max(xtab[:,-1])
xval = np.linspace(0.0,mymax,num=300)
plt.figure(FIG_COR)
for line in linevals:
yval = polyval( xval,polyfit[line,::-1])
plt.plot(xtab[line,:],ytab,xval,yval,':')
plt.xlabel('Observed log attenuation ratio')
plt.ylabel('Effective single energy log attenuation ratio')
# add the x=y line for comparison with circles for fit points
nsamp = len(carouselData.mask)-1
xarr = np.zeros(nsamp)
count = 0
for i in range(nsamp):
if not carouselData.mask[i]:
xarr[count] = carouselCal.getAvAtten(linevals[0],i)
count = count+1
#print("mask ",i,"t",carouselCal.getAvAtten(linevals[0],i))
plt.plot([0.0,mymax],[0.0,mymax],'r--')
plt.plot(xarr,xarr,'ro')
plt.draw()
plt.show(block=False)
def setFilter(string):
""" List filters defined or change settings of an existing filter. Can not at
present add a new filter """
try:
if carouselCal.isValid():
#print("have carouselCal - str: ",string)
if len(string) == 3:
#print("try and set ",string[1:])
try:
mat = string[1]
val = float(string[2])
except:
print("** Bad values")
return
for i in range(carouselCal.filterCount):
if carouselCal.filterMaterial[i] == mat:
carouselCal.filterWidth[i] = val
print("set ",mat," filter width to ",val)
return
print("filter type not found")
else:
print("Filters set:")
print("filter, width:")
for i in range(carouselCal.filterCount):
print(carouselCal.filterMaterial[i], ",", carouselCal.filterWidth[i])
except:
print("no carousel data file loaded")
def fitAtt(string):
""" Check necessary data has been set then fit model to carousel data.
Finally generate curves for attenuation over each line using the
correction material (corMat/corEn) and then fit a polynomial to this for
correction purposes.
"""
global res,xtab,ytab,fit,polyfit,xpolyfit
# lstep is the line step; e.g. 1 for every line, 2 for every other line in fitting
lstep = 1
if carouselData == None or carouselCal == None:
print("must load data first")
return
if not carouselData.valid or not carouselCal.valid:
print("data not correctly loaded")
return
if corMat.name=="":
print(" ** Must define corrrection material and energy using 'setcormat'")
return
# if no SpeKCalc data available, need to allow fitted spectra
if carouselCal.spec.getS() is None and vary[4]==-1:
print("** No spectrum is defined. Must use e.g. 'vary spectra 0'")
return
defMat = "Cu"
fit = cu.fitData(carouselData, carouselCal, defMat)
fit.verbose = debug
if debug:
np.seterr(over='warn',invalid='warn')
else:
np.seterr(over='ignore',invalid='ignore')
if np.max(vary)<0:
print("** Error: no parameters to fit, check setvary")
return
x = np.zeros(7+np.sum(vary))
if len(string) == 2 or len(string) == 3:
print("Fitting variables: ",np.sum(vary)+len(vary))
# The fit function consists of 3 polynomial expressions in the
# the line number, plus a possible polynomial in the energy.
# Initial values for the zero order terms are
# given here, the higher terms (if any) are set to zero.
# Updated to allow any of the variables to be excluded from the fit (-1)
# In this case the initial value, in startX, should be used, which is passed
# to fit.
offset = vary[0]
if vary[0]>-1:
x[offset] = startX[0]
offset = offset+1+vary[1]
if vary[1]>-1:
x[offset] = startX[1]
offset = offset+1+vary[2]
if vary[2]>-1:
x[offset] = startX[2]
offset = offset+2+vary[3]+vary[4]
if vary[4]>-1:
x[offset] = startX[4]
offset = offset+1+vary[5]
if vary[5]>-1:
x[offset] = startX[5]
offset = offset+1+vary[6]
if vary[6]>-1:
x[offset] = startX[6]
fit.defaults = startX
try:
nlines = int(string[1])
if len(string) == 3:
lstep = int(string[2])
except:
print("Wrong arguments: fitatt nlines [linestep]")
return
else:
print("wrong number of args: need fitatt nlines [linestep]")
print("where nlines=number of lines to fit and lstep is step between")
print("lines, default 1")
return
if nlines < 1 or nlines > carouselCal.lines:
print("fit lines out of range, must be 1 to ",carouselCal.lines)
return
fit.vary_target = vary[0]
fit.vary_detector = vary[1]
fit.vary_filter = vary[2]
fit.vary_energy = vary[3]
fit.vary_epk = vary[4]
fit.vary_ewidlow = vary[5]
fit.vary_ewidhigh = vary[6]
fit.solver = solverChoice
t0 = timeit.default_timer()
try:
res,cov,infodict,mesg,ier = fit.dofit(nlines,lstep,x)
except Exception as experr:
print("** Fit failed due to exception: ",experr)
return
tim = timeit.default_timer()-t0
print("time=",tim)
print("dofit returned: ")
print(" best fit values = ",res)
if ier>0 and ier<5:
print(" ret = ",ier)
else:
# While fit not fully converged, result may still be OK
print(" Fit warning: ret=",ier)
print(" message=",mesg)
print(" iterations = ",infodict["nfev"])
# measure error
samples = carouselCal.samples
ofile = open('fit.log','w')
ofile.write('time={0:12.6f}\n'.format(tim))
ofile.write('dofit returned: ')
ofile.write(' best fit values = \n')
ofile.write(str(res)+'\n')
ofile.write(' ier = {0:5d}\n'.format(ier))
ofile.write(' iterations = {0:5d}\n'.format(infodict["nfev"]))
ofile.write(' cov = ')
ofile.write(str(cov)+'\n')
ofile.write(' mesg = '+mesg+'\n')
rfile = open('param.log','w')
rfile.write('fit input: lines={0:5d}\n'.format(nlines))
rfile.write('guess: ')
rfile.write(str(x))
rfile.write('\n')
rfile.write('solution: ')
rfile.write(str(res))
rfile.write('\n')
sumtot=0.
summax=0.
# calculate the attenuation(corEn) vs attenuation(observed) and return
# polynomial fit to these curves for each line.
attLnWid = 14.0
attPts = 300
xtab,ytab,polyfit,xpolyfit = fit.linesPolyFit(res,corMat,corEn,attPts,attLnWid)
# find average and max error for each line
rfile.write('polyfits '+str(polyfit.shape[0])+" "+str(polyfit.shape[1])+"\n")
lsumsq = []
avatt = np.zeros((2,samples))
for line in range(nlines):
sumsq = 0.
for sample in range(samples):
if carouselData.mask[sample]:
continue
sumsq += (fit.atten[line,sample] - carouselCal.getAvAtten(line,sample) ) ** 2
if line==0:
avatt[0,sample] = carouselCal.getAvAtten(line,sample)
avatt[1,sample] = fit.atten[line,sample]
ofile.write(' {0:5d} {1:12.6f}\n'.format(line,sumsq))
lsumsq.append(sumsq)
sumtot += sumsq
if sumsq>summax:
summax = sumsq
if debug:
print("Line: ",line," ave error:",sumsq)
# save poly data to param.log
if len(polyfit[:,0])>line:
rfile.write('{0:5d} '.format(line)+str(polyfit[line,:])+'\n')
# write data for xtek
rfile.write('xpoly:\n')
for line in range(len(xpolyfit[:,0])):
rfile.write('{0:5d} '.format(line)+str(xpolyfit[line,:])+'\n')
# write data in binary file
bfile = open("polyfit.npz","wb")
np.save(bfile,polyfit)
bfile.close()
#
print("average error: ",sumtot/nlines)
print("max error: ",summax)
rfile.write("average error: {0:12.6f}\nmax error: {1:12.6f}\n".format(sumtot/nlines,summax))
try:
plt.figure(FIG_ERR)
plt.plot(lsumsq)
plt.xlabel('line number')
plt.ylabel('mean sq error')
plt.draw()
plt.show(block=False)
#
plt.figure(FIG_ATTCOMP)
nsamp = len(avatt[0,:])
xnum = np.array(range(nsamp))+1
plt.subplot(211)
plt.plot(xnum,avatt[0,:],'bx')
mark = next(markerErr)
plt.plot(xnum,avatt[1,:],marker=mark)
plt.xlabel('sample number')
plt.ylabel('log(I0/I)')
plt.subplot(212)
plt.plot(xnum,avatt[0,:]-avatt[1,:],marker=mark)
plt.ylabel('err log(I0/I)')
plt.draw()
plt.show(block=False)
except:
print("Plotting failed")
ofile.close()
rfile.close()
def initGuess(words):
""" Set initial values to use for the variables of the target absortion width, detector
width and filter width """
try:
startX[0] = float(words[1])
startX[1] = float(words[2])
startX[2] = float(words[3])
if len(words)==8:
startX[3] = float(words[4])
startX[4] = float(words[5])
startX[5] = float(words[6])
startX[6] = float(words[7])
elif len(words)>4:
print("Some values ignored!")
except:
print("initguess requires 3 or 7 floating point values: dt, ln(dd) and df. dt is the target "+
"absortion width, ln(dd) is the log of detector width, and df the filter width.")
print("If 7 values used additional values are: energy coeff, spectral peak, low/high width")
print("Current guess = ",startX[0:7])
def setWidth(words):
""" set the half width of area along row to be averaged"""
if len(words)>1:
try:
width = float(words[1])
carouselCal.width = width
carouselCal.setWidthAve(width)
except:
print("load carousel data before setting width")
return
else:
try:
print("width= ",carouselCal.width, " (No. of pixels about centre of line to average)")
except:
print("width not set until carousel data loaded")
def showCalConf(string):
""" prints out some calibration data"""
try:
filterCount = carouselCal.filterCount
except NameError:
print("** must read calibration data first")
return
print("filter count = ", filterCount)
print("filter material width density fittable")
for i in range(filterCount):
print('{0:4d} {1:7s} {2:7f} {3:7f}'.format(i,
carouselCal.filterMaterial[i],
carouselCal.filterWidth[i], carouselCal.filterDensity[i]))
print("Detector:")
print(' {0:7s} {1:7f} {2:7f}'.format(
carouselCal.detectorMaterial,
carouselCal.detectorWidth, carouselCal.detectorDensity))
print("Source filter:")
print(' {0:7s} {1:7f} {2:7f}'.format(carouselCal.targetMat,
0.0, carouselCal.targetDensity))
print("Voltage=", carouselCal.voltage, " angle=", carouselCal.angle)
def setVary(strlst):
""" define polynomial order of parameters to fit """
if len(strlst)==1:
print("Control order of polynomial used for fitting across lines")
print(" - 3 widths are fitted for target, detector and filter")
print(" - Experimental is to set energy dependence away from linear")
print(" - If using energy, suggest only 0 order and check results")
print(" - using -1 implies do not vary parameter, ONLY FOR ENERGY")
print("current settings:")
print("target: ",vary[0])
print("detector: ",vary[1])
print("filter: ",vary[2])
print("energy dependence: ",vary[3])
print("spectra: ",vary[4])
return
if len(strlst)==3:
try:
npo = int(strlst[2])
except:
print("** failed to read npo")
npo = 0
if npo<-1 or npo>3:
print("** Order must be range -1 to 3")
return
if strlst[1]=="target":
vary[0] = npo
elif strlst[1]=="detector":
vary[1] = npo
elif strlst[1]=="filter":
vary[2] = npo
elif strlst[1]=="energy":
vary[3] = npo
elif strlst[1]=="spectra":
if npo>0:
print("Error: spectra can only take values -1 and 0 at present")
return
vary[4] = npo
vary[5] = npo
vary[6] = npo
else:
print("Not recognised: ",strlst[1])
else:
print("syntax: vary [target|detector|filter|energy n]")
def debugToggle(cmd):
""" toggle debug """
global debug
debug = not debug
print("debug set to ",debug)
def helpCar(cmd, string):
""" just print list of commands"""
print("Carousel calibration utility")
print(" ")
print("cmds:")
for i in cmd:
print(" ", i)
print(" ")
print("To execute script file use: read <filename>")
print(" ")
print("The required input is a set of images of test pieces imaged at the exact same")
print("Xray settings as the sample. These may be in a carousel or a crown device. The")
print("details of each test item (material/density/thickness) must be provided in a")
print("file in './carouselData'. Using these images a fit is made to the effective")
print("energy response function of the Xray source/detector. Using this fit a correction")
print("curve can be determined to map from observed attenuation to true attenuation of the")
print("dominant Xray material of the sample. This will be inaccurate on samples with muliple")
print("material types in them. The output is a file of polynomial fits giving the corrections")
print("which can be used in the python program 'applyTransForm.py'")
print(" ")
def setCorMat(words):
""" Input the name of the material that will be the target of the attenuation correction
e.g. Calcium hydroxyapatite which might be defined in a file cahypa.txt, precalculated
using the program xcom. Without arguments, list current setting, if any."""
global corMat,corEn
if len(words)>2:
name = words[1]
try:
corEn = float(words[2])
except:
print("failed to read energy value")
return
else:
if corMat.name != "":
print("corMat is set to ",corMat.name," and energy ",corEn,"KeV")
else:
print("corMat is not set")
return
name = words[1]
print("reading correction material definition from file: ",name)
try:
corMat = cu.materialAtt(name,1.0)
except:
print("error reading material type")
def mask(words):
""" show or set a mask array which is used to define if some of the sample data
is not to be used in the fit. e.g. "mask 7" will mean sample 7 is omitted
from the fit. "mask off" will restore all samples to the fit.
"""
if carouselData== None:
print("must load data first")
return
if len(words) == 1:
print("Mask = ",carouselData.mask)
elif words[1] == "off":
carouselData.mask.fill(False)
else:
try:
for i in range(len(words)-1):
num = int(words[i+1])
if num > 0:
carouselData.mask[num-1] = True
elif num < 0:
carouselData.mask[-num-1] = False
else:
print("Warning: label 0 ignored")
except:
print("Error: bad value in list")
def transform(words):
""" TEST code to try and fix up some data; should get right data
in first place, Assume we have "I" data and want log(I0/I) values, where I0 is
provided by the user.
"""
if len(words)<2:
print("transform command is a test command. Was used to map image intensity")
print(" data I to log(I0/I), in the case where only I is provided. Now redundant")
print(" as uint16 data is assumed to include I0 image at start and transform applied.")
return
I0 = float(words[1])
nsamp = len(carouselData.mask)-1
for i in range(nsamp):
z = carouselCal.getImage(i)
zerocount = sum(sum(z<=0))
if zerocount>0:
print("zeros for sample ",i," are ",zerocount)
z[z<=0] = 1e-4
z = np.log(z)
z = np.log(I0) - z
carouselCal.getImage(i)[:,:] = z
def setOptions(words):
""" Set options controlling the fit process. Currently just select between the new and old
versions of the least squares solver.
"""
global solverChoice
if len(words)<2:
print("Options:")
print(" solver = ",solverChoice)
elif words[1]=="solver=new":
solverChoice="new"
elif words[1]=="solver=old":
solverChoice="old"
else:
print("Option not recognised")
def checkVersions():
""" Check version of matplotlib and exit if too old
"""
import matplotlib as mpl
def versiontuple(v):
""" split . separated string for comparison
"""
return tuple(map(int, (v.split("."))))
if versiontuple(mpl.__version__) < versiontuple('1.1'):
print("Matplotlib version too old, need at least 1.1; have ",mpl.__version__)
sys.exit(1)
# Set of commands that are implemented and the corresponding function names.
# Additional commands and functions can be added here.
cmd_switch = { "load":loadAll,
"showimg":showImg,
"showspec":showSpec,
"showconf":showCalConf,
"showatt":showAtt,
"showcor":showCor,
"setfilter":setFilter,
"setwidth":setWidth,
"fitatt":fitAtt,
"initguess":initGuess,
"vary":setVary,
"setcormat":setCorMat,
"debug":debugToggle,
"mask":mask,
"quit":quitCarousel,
"help":helpCar,
"transform":transform,
"setoptions":setOptions,
}
# set figures to use for different plots
FIG_COR = "Correction"
FIG_ATT1D = "CarouselLines"
FIG_SPEC = "Spectra"
FIG_IMG = "CarouselImages"
FIG_ERR = "ErrorInFit"
FIG_ATTCOMP = "ObservedVsFittedAtten"
# simple command line loop to allow loading of data and run
# of fitting code.
carouselData = None
carouselCal = None
# initial guesses unless user changes them
startX = np.array([0.01,-6.0,0.01,0.,30.,0.05,0.05])
checkVersions()
def main():
logging.basicConfig(filename='runCarouselFit.log',level=logging.INFO)
localtime = time.asctime( time.localtime(time.time()) )
logging.info('started at '+localtime)
nargs = len(sys.argv)
global debug
global carouselCal
global vary
global solverChoice
debug = False
carouselCal = None
print(" ")
print(" *** Carousel data fitting program ***")
print(" ")
print(" Code based on algorithms developed at Queen Mary University")
print(" of London by <NAME>, <NAME> et al")
print(" This implementation by <NAME> at STFC")
print(" contact <EMAIL>")
print(" Funded by the CCPi project")
print(" ")
# set the polynomial order of the fitting variables. Variables are
# function of line number.
vary = np.zeros(7,dtype='int')
vary[3:] = -1
# set the default solver type to be "old"
solverChoice = "old"
# set an object for the material to which attenuation is to be corrected to;
# this is null until the user provides one
corMat = cu.materialAtt("",1.0)
# command loop
filein = False
while True:
try:
if filein:
cmd = infile.readline().strip()
if cmd=='':
filein = False
infile.close()
continue
print(" bhc: ",cmd)
else:
#if sys.version_info.major == 2:
if sys.version_info[0] == 2:
cmd = raw_input("bhc: ").strip()
else:
cmd = input("bhc: ").strip()
except EOFError as ex:
logging.info('bhc: EOF')
print("EOF")
sys.exit(0)
logging.info('bhc: '+cmd)
words = cmd.split(" ")
try:
if words[0] == "help":
cmd_switch[words[0]](cmd_switch, words)
elif words[0] == "read":
if len(words)>1 and os.path.isfile(words[1]):
rfile = words[1]
infile = open(rfile,'r')
filein = True
else:
print("Error - syntax: read file")
continue
elif words[0] == "#":
continue
else:
cmd_switch[words[0]](words)
except SystemExit as ex:
sys.exit(0)
except KeyError as ex:
if not ( words[0] == "" ):
print("** command not found: ", words[0])
except:
print("** internal error: ", sys.exc_info()[0])
raise
if __name__ == "__main__":
main()
| StarcoderdataPython |
393475 | <filename>tvsched/application/models/auth.py
from dataclasses import dataclass
import uuid
from tvsched.entities.auth import Role
@dataclass(frozen=True)
class UserAdd:
"""Data for register user with USER role."""
username: str
password: str
@dataclass(frozen=True)
class UserWithRoleAdd:
"""Data for register user with specific role."""
username: str
password: str
role: Role
@dataclass(frozen=True)
class UserLogIn:
"""Data for log in user"""
username: str
password: str
@dataclass(frozen=True)
class UserInRepoAdd:
"""Data for adding user to repo."""
username: str
password_hash: str
role: Role
@dataclass(frozen=True)
class UserInRepo:
"""Data in repo about user."""
id: uuid.UUID
username: str
password_hash: str
role: Role
@dataclass(frozen=True)
class UserInToken:
"""Data about user in stored in token."""
id: uuid.UUID
role: Role
| StarcoderdataPython |
5088317 | <filename>utils/model_utils.py
import torch,os
import torch.nn as nn
from reformer_pytorch_LD.reformer_pytorch import ReformerLM
from reformer_pytorch_LD.reformer_pytorch_kmeans import ReformerKmeansLM
def init_weight(args,weight):
if args.init == 'uniform':
nn.init.uniform_(weight, -args.init_range, args.init_range)
elif args.init == 'normal':
nn.init.normal_(weight, 0.0, args.init_std)
def init_bias(bias):
nn.init.constant_(bias, 0.0)
def weights_init(args,m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
if hasattr(m, 'weight') and m.weight is not None:
init_weight(m.weight)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
elif classname.find('AdaptiveEmbedding') != -1:
if hasattr(m, 'emb_projs'):
for i in range(len(m.emb_projs)):
if m.emb_projs[i] is not None:
nn.init.normal_(m.emb_projs[i], 0.0, args.proj_init_std)
elif classname.find('Embedding') != -1:
if hasattr(m, 'weight'):
init_weight(m.weight)
elif classname.find('ProjectedAdaptiveLogSoftmax') != -1:
if hasattr(m, 'cluster_weight') and m.cluster_weight is not None:
init_weight(m.cluster_weight)
if hasattr(m, 'cluster_bias') and m.cluster_bias is not None:
init_bias(m.cluster_bias)
if hasattr(m, 'out_projs'):
for i in range(len(m.out_projs)):
if m.out_projs[i] is not None:
nn.init.normal_(m.out_projs[i], 0.0, args.proj_init_std)
elif classname.find('LayerNorm') != -1:
if hasattr(m, 'weight'):
nn.init.normal_(m.weight, 1.0, args.init_std)
if hasattr(m, 'bias') and m.bias is not None:
init_bias(m.bias)
elif classname.find('TransformerLM') != -1:
if hasattr(m, 'r_emb'):
init_weight(m.r_emb)
if hasattr(m, 'r_w_bias'):
init_weight(m.r_w_bias)
if hasattr(m, 'r_r_bias'):
init_weight(m.r_r_bias)
if hasattr(m, 'r_bias'):
init_bias(m.r_bias)
def update_dropout(args,m):
classname = m.__class__.__name__
if classname.find('Dropout') != -1:
if hasattr(m, 'p'):
m.p = args.dropout
def update_dropatt(args,m):
if hasattr(m, 'dropatt'):
m.dropatt.p = args.dropatt
def select_model(args,ntokens):
if args.kmeans:
model = ReformerKmeansLM(
num_tokens=ntokens,
dim=args.emsize,
depth=args.depth,
heads=args.heads,
n_hashes=args.n_hashes,
bucket_size=64,
ff_chunks=10,
lsh_dropout=0.1,
max_seq_len=args.seqlen,
weight_tie=True,
causal=args.causal,
n_local_attn_heads=4,
k_means_hashing=True,
use_full_attn=False
)
return model
else:
model = ReformerLM(
num_tokens=ntokens,
dim=args.emsize,
depth=args.depth,
heads=args.nhead,
n_hashes=args.n_hashes,
bucket_size=64,
ff_chunks=10,
lsh_dropout=0.1,
max_seq_len=args.seqlen,
weight_tie=True,
causal=args.causal,
n_local_attn_heads=4,
use_full_attn=False # set this to true for comparison with full attention
)
return model
def load_checkpoint(checkpoint, model, strict=True):
"""Loads model parameters (state_dict) from file_path. If optimizer is provided, loads state_dict of
optimizer assuming it is present in checkpoint.
Args:
checkpoint: (string) filename which needs to be loaded
model: (torch.nn.Module) model for which the parameters are loaded
optimizer: (torch.optim) optional: resume optimizer from checkpoint
"""
checkpoint1 = torch.load(checkpoint, map_location='cpu')
print(checkpoint1.keys())
pretrained_dict = checkpoint1
model_dict = model.state_dict()
print(pretrained_dict.keys())
print(model_dict.keys())
# # # 1. filter out unnecessary keys
# # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
pretrained_dictnew = {}
for k, v in pretrained_dict.items():
if 'net.net.' in k:
k = k[8:]
pretrained_dictnew[k] = v
# # # for k, v in pretrained_dict.items():
# # # k = k.strip('model.')
# # # pretrained_dictnew[k] = v
print(pretrained_dictnew.keys())
# #pretrained_dict = {k: v for k, v in pretrained_dictnew.items() if k in model_dict}
#
# # # 2. overwrite entries in the existing state dict
# # model_dict.update(pretrained_dict)
# # # 3. load the new state dict
# # #model.load_state_dict(pretrained_dict)
if not os.path.exists(checkpoint):
raise ("File doesn't exist {}".format(checkpoint))
# model.load_state_dict(checkpoint1['model_dict'] , strict=strict)p
model.load_state_dict(pretrained_dictnew, strict=strict)
return model
| StarcoderdataPython |
6490530 | '''
Usage: openPortfolio [-dpwish] [COUNTER] ...
Arguments:
COUNTER Optional counters
Options:
-p,--portfolio Select portfolio from config.json
-w,--watchlist Select watchlist from config.json
-i,--i3 Open in i3investor.com
-s,--sb Open in my.stockbit.com
-d,--debug Debug mode
-h,--help This page
This app opens counter[s] in browser, if counter not specified, then refer config.json
'''
import settings as S
import requests
import webbrowser
from lxml import html
from BeautifulSoup import BeautifulSoup
from common import loadCfg, getDataDir, formStocklist, getCounters, getI3Dir
from docopt import docopt
from utils.dateutils import getTime
LOGIN_URL = "/loginservlet.jsp"
REFERER_URL = "/jsp/admin/login.jsp"
def connectPortfolio(uid, pwd):
payload = {
"uid": uid,
"pwd": <PASSWORD>,
}
try:
with requests.session() as session:
result = session.post(LOGIN_URL, data=payload, headers=S.HEADERS)
print result.text
assert(result.status_code == 200)
page = session.get(S.I3_PORTFOLIO_URL, headers=S.HEADERS)
assert(page.status_code == 200)
html = page.content
soup = BeautifulSoup(html)
except Exception as e:
print(e)
soup = ''
return soup
def compilePortfolioLinks(soup):
if soup is None or len(soup) <= 0:
print 'ERR: no result'
return None
divclass = soup.find('div', {'class': 'dataTables_wrapper'})
if divclass is None:
print 'ERR: no result for div'
return None
table = divclass.find('table', {'class': 'nc dataTable', 'id': 'pfenttable'})
if table is None:
print 'ERR: no result for table'
return None
global chartlink
chartlink = []
for tr in table.findAll('tr'):
leftTag = tr.findAll('td', {'class': 'left'})
if leftTag is None or len(leftTag) == 0:
continue
stockShortName = leftTag[0].text.replace(';', '')
stockLink = tr.find('a').get('href')
# Sample stockLink: /servlets/stk/1234.jsp
stockCode = stockLink[14:-4]
chartlink += [leftTag.find('a').get('href')]
print stockShortName, stockCode, stockLink, chartlink
def openPortfolioLinks(chartlinks):
new = 1
if len(S.CHROME_DIR) > 0:
browser = webbrowser.get(S.CHROME_DIR)
for url in chartlinks:
if S.DBG_ALL:
print getTime(), url
if len(S.CHROME_DIR) > 0:
browser.open(url, new=new, autoraise=True)
else:
webbrowser.open(url, new=new, autoraise=True)
new = 2
def compileLinks(i3, sb, counters):
i3chartlinks = []
sbchartlinks = []
SB_URL = 'https://my.stockbit.com/#/symbol/KLSE-'
stocklist = formStocklist(counters, getI3Dir() + 'klse.txt')
for key in stocklist.iterkeys():
if i3:
i3chartlinks.append(S.I3_KLSE_URL + '/servlets/stk/chart/' +
stocklist[key] + '.jsp')
if sb:
sbchartlinks.append(SB_URL + key + '/chartbit')
return i3chartlinks, sbchartlinks
if __name__ == '__main__':
args = docopt(__doc__)
loadCfg(getDataDir(S.DATA_DIR))
counters = getCounters(args['COUNTER'], args['--portfolio'],
args['--watchlist'])
if args['--debug']:
S.DBG_ALL = True
if S.DBG_ALL:
print getTime(), counters
if len(counters) > 0:
i3chartlinks, sbchartlinks = compileLinks(args['--i3'],
args['--sb'], counters)
if S.DBG_ALL:
print getTime(), i3chartlinks
print getTime(), sbchartlinks
else:
LOGIN_URL = S.I3_KLSE_URL + LOGIN_URL
REFERER_URL = S.I3_KLSE_URL + REFERER_URL
compilePortfolioLinks(connectPortfolio(S.I3_UID, S.I3_PWD))
if i3chartlinks is not None and len(i3chartlinks) > 0:
openPortfolioLinks(i3chartlinks)
if sbchartlinks is not None and len(sbchartlinks) > 0:
openPortfolioLinks(sbchartlinks)
| StarcoderdataPython |
1707104 | <gh_stars>0
from main import generate
import argparse
parser = argparse.ArgumentParser(description='ETERNALCRYSTAL - Generate code names on the fly.')
parser.add_argument('-w', metavar='--wordlist', type=str, nargs='?',help='path to wordlist')
parser.add_argument('-v', '--version', action='version', version='v0.0.1-alpha', help='print version')
args = parser.parse_args()
file = f'{args.w}'
generate(file)
| StarcoderdataPython |
1991953 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import getpass
import io
import logging
import os
import platform
import requests
import string
import sys
import tkinter as tk
import tkinter.filedialog as fd
import tkinter.messagebox as messagebox
from datetime import timedelta
from PIL import Image, ImageTk
from buddy_downloader import BuddyDownloader
class BuddyGUI:
"""
GUI that helps download video and/or audio from YouTube link
"""
def __init__(self):
self.logger = logging.getLogger('YouTuBuddy')
self.buddy_downloader = BuddyDownloader()
self.run()
def check_url(self):
"""
Check given url and refresh summary and download boxes
Returns
-------
None.
"""
# Clear frames
self.clear_frame(self.frame_info)
self.clear_frame(self.frame_download)
# Load videodata
self.logger.info('Loading videodata...')
try:
self.buddy_downloader.load_url(self.url.get())
except Exception as e:
self.logger.error(f'Could not load video. Error: {str(e)[:400]}')
messagebox.showerror(title='Error',
message='Invalid url. Enter proper url to YouTube video.')
return
self.yt = self.buddy_downloader.yt
self.logger.info('Videodata loaded')
# Show frames
self.show_info()
self.show_downloadbox()
# Change urlbutton color
self.button_url.configure(bg='lightblue')
def show_info(self):
"""
Show info about loaded video in Summary box
Returns
-------
None.
"""
# Show title
self.label_info = tk.Label(self.frame_info,
text='Summary:',
font=('', 12, 'bold')
)
self.label_info.pack(side=tk.TOP, anchor='nw', expand='YES')
# Show thumbnail image
raw_image = requests.get(self.yt.thumbnail_url).content
im = Image.open(io.BytesIO(raw_image))
im.thumbnail((350, 500), Image.ANTIALIAS)
self.image = ImageTk.PhotoImage(im)
self.thumbnail_image = tk.Label(self.frame_info, image=self.image)
self.thumbnail_image.pack(side=tk.TOP, anchor='nw')
# Show video details
info_texts = [
f'Title: {self.yt.title[:50]}',
f'Added: {self.yt.publish_date}',
f'Length: {timedelta(seconds=self.yt.length)}',
f'Views: {self.yt.views}'
]
for text in info_texts:
self.label_info_text = tk.Label(self.frame_info, text=text, anchor='w')
self.label_info_text.pack(side=tk.TOP, anchor='nw')
def show_downloadbox(self):
"""
Show download options
Returns
-------
None.
"""
# Show title
self.label_download = tk.Label(self.frame_download,
text='Download:',
font=('', 12, 'bold')
)
self.label_download.pack(side=tk.TOP, anchor='nw', expand='YES')
# Empty space
self.space = tk.Label(self.frame_download, text='\n', font=('', 8, 'bold'))
self.space.pack()
# Checkbuttons
self.checkbutton_video = tk.Checkbutton(self.frame_download,
text="Video",
variable=self.video_var,
font=('', 12, 'bold')
)
self.checkbutton_video.pack()
self.checkbutton_audio = tk.Checkbutton(self.frame_download,
text="Audio",
variable=self.audio_var,
font=('', 12, 'bold')
)
self.checkbutton_audio.pack()
# Empty space
self.space = tk.Label(self.frame_download, text='\n', font=('', 8, 'bold'))
self.space.pack()
# Add download button
self.download_button = tk.Button(self.frame_download,
height=4, width=14,
text='Download',
command=self.download,
bg='lightgreen',
font=('', 12, 'bold')
)
self.download_button.pack(side=tk.RIGHT)
@staticmethod
def enable_frame(frame: tk.Frame) -> None:
"""
Set all children's status as normal in given frame
Parameters
----------
frame : tk.Frame
frame object.
Returns
-------
None.
"""
for child in frame.winfo_children():
child.configure(state='normal')
@staticmethod
def disable_frame(frame: tk.Frame) -> None:
"""
Set all children's status as disable in given frame
Parameters
----------
frame : tk.Frame
frame object.
Returns
-------
None.
"""
for child in frame.winfo_children():
child.configure(state='disable')
@staticmethod
def clear_frame(frame: tk.Frame) -> None:
"""
Deletes all children in given frame
Parameters
----------
frame : tk.Frame
frame object.
Returns
-------
None.
"""
for child in frame.winfo_children():
child.destroy()
def init_download_path(self):
"""
Get default download_path for proper OS
Linux: ~/Downloads
Windows: C://Users/{username}/Downloads
Returns
-------
download_path : str
default downloads path.
"""
# Detect os
system = platform.system()
username = getpass.getuser()
if system == 'Linux':
download_path = os.path.join(os.path.expanduser("~"), 'Downloads')
elif system == 'Windows':
download_path = f'C://Users/{username}/Downloads'
else:
self.logger.error(f'Ooops, YouTuBuddy does not support system {system}')
sys.exit(0)
return download_path
def choose_download_path(self):
"""
Choose download path.
Changes entry bar with download path
For button Change (download directory)
Returns
-------
None.
"""
new_directory = fd.askdirectory()
if new_directory:
self.entry_path.configure(state='normal')
self.entry_path.delete(0, tk.END)
self.entry_path.insert(tk.INSERT, new_directory)
def download(self):
"""
Download video and/or audio. For button Download
Returns
-------
None.
"""
video_var = self.video_var.get()
audio_var = self.audio_var.get()
if video_var and audio_var:
downloaded_video = self.buddy_downloader.download_video(output_path=self.download_path.get())
self.buddy_downloader.convert_mp4_to_mp3(downloaded_video)
elif video_var and not audio_var:
self.buddy_downloader.download_video(output_path=self.download_path.get())
elif not video_var and audio_var:
allowed_chars = string.ascii_letters + string.digits + ' '
filename = ''.join(letter for letter in self.yt.title if letter in allowed_chars) + '_audio'
downloaded_video = self.buddy_downloader.download_video(output_path=self.download_path.get(),
filename=filename)
self.buddy_downloader.convert_mp4_to_mp3(downloaded_video)
os.remove(downloaded_video)
else:
messagebox.showinfo(title='Warning', message='Select at least one checkbox')
return
messagebox.showinfo(title='Success', message='Downloading finished.')
def run(self):
"""
Runs GUI
Returns
-------
None.
"""
# Window
self.win = tk.Tk()
self.win.title('YouTuBuddy')
self.win.geometry('600x450+20+20')
# Variables
self.download_path = tk.StringVar()
self.url = tk.StringVar()
self.video_var = tk.IntVar()
self.audio_var = tk.IntVar()
# =====================================================================
# TOP FRAME/INPUT URL
# =====================================================================
self.frame_url = tk.Frame(self.win, borderwidth=10)
self.frame_url.pack(side=tk.TOP, anchor='w')
self.label_url = tk.Label(self.frame_url,
text='Enter URL:',
font=('', 12, 'bold')
)
self.label_url.pack(side=tk.LEFT, anchor='w')
self.entry_url = tk.Entry(self.frame_url,
width=50,
state='normal',
textvariable=self.url,
font=('', 10, '')
)
self.entry_url.pack(side=tk.LEFT, anchor='w')
self.button_url = tk.Button(self.frame_url,
text='Check',
command=self.check_url,
bg='lightgreen'
)
self.button_url.pack(side=tk.RIGHT)
# =====================================================================
# TOP FRAME/DOWNLOAD PATH
# =====================================================================
self.frame_path = tk.Frame(self.win, borderwidth=10)
self.frame_path.pack(side=tk.TOP, anchor='w')
self.label_path = tk.Label(self.frame_path,
text='Folder:',
font=('', 12, 'bold')
)
self.label_path.pack(side=tk.LEFT, anchor='w')
self.entry_path = tk.Entry(self.frame_path,
width=53,
state='normal',
textvariable=self.download_path,
font=('', 10, '')
)
self.entry_path.pack(side=tk.LEFT, anchor='w')
self.entry_path.insert(0, self.init_download_path())
self.button_path = tk.Button(self.frame_path,
text='Change',
command=self.choose_download_path,
bg='LightGoldenrod1')
self.button_path.pack(side=tk.RIGHT)
# =====================================================================
# BOTTOM FRAME
# =====================================================================
self.frame_bottom = tk.Frame(self.win)
self.frame_bottom.pack(side=tk.TOP, anchor='nw')
# =====================================================================
# INFO FRAME
# =====================================================================
self.frame_info = tk.Frame(self.frame_bottom, borderwidth=10)
self.frame_info.pack(side=tk.LEFT)
# =====================================================================
# DOWNLOAD FRAME
# =====================================================================
self.frame_download = tk.Frame(self.frame_bottom, borderwidth=10)
self.frame_download.pack(side=tk.RIGHT, anchor='ne')
# Run
self.win.mainloop()
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout,
level=logging.INFO)
gui = BuddyGUI() | StarcoderdataPython |
8042871 | <reponame>dmitsf/GOT
''' FADDIS clustering implementation in Python
'''
import numpy as np
import numpy.linalg as LA
ZERO_BOUND = 10 ** (-9)
MIN_CLUSTER_CONTRIBUTION = 5 * 10 ** (-3)
EPSILON = 5 * 10 ** (-2)
# Maximum number of clusters
MAX_NUM_CLUSTERS = 15
def ensure_np_matrix(A):
if not isinstance(A, np.matrix):
A = np.matrix(A)
return A
def faddis(A):
''' faddis: equential extraction of fuzzy clusters, in a sequential manner
A is NxN similatriy matrix, symmetrized
membership_matrix - NxK membership matix of clustering;
contrib - 1xK vector of relative contributions to the data scatter;
intensity - Kx2 matrix of cluster intensities^0.5 and intensities;
lat - 1xK vector of eigen-values corresponding to clusters;
cluster_got
'''
A = ensure_np_matrix(A)
# minimum cluster's relative contribution to the data scatter
min_cont = MIN_CLUSTER_CONTRIBUTION
# minimum relative residual data scatter
eps = EPSILON
# maximum number of clusters
max_clust_num = MAX_NUM_CLUSTERS
is_positive = True
matrix_dim, _ = A.shape
sc = np.power(A, 2)
# Total data scatter
scatter = np.sum(sc)
cluster_got = 0
membership_matrix = np.empty((matrix_dim, 0))
contrib = np.array([])
lat = np.array([])
intensities = np.empty((0, 2))
curr_cont = 1
res_cont = 1
# 'zero' and 'one' vectors for comparisons
zeros_vect = np.zeros((matrix_dim, 1))
ones_vect = np.ones((matrix_dim, 1))
# ensure matrix is symmetrical
At = (A + A.T) / 2
matrix_sequence = [At]
# Stop condition:
# is_positive is True: eigen-value of the residual matrix is not positive;
# OR la cluster intensity reaches its minimum lam;
# OR ep relative residual data scatter reaches its minimum eps;
# OR maximum number of clusters max_clust_num is achieved
while is_positive and curr_cont > min_cont and res_cont > eps and cluster_got <= max_clust_num:
# collecting a fuzzy cluster membership uf, with contrib con and intensity la,
eig_vals, eig_vecs = LA.eig(At)
# (lt, ii) - (maximum eigen-value, corresponding position)
eig_vals_diag = np.diag(eig_vals)
# Only positive eigenvalues
eig_vals_pos = np.argwhere(eig_vals > ZERO_BOUND).ravel()
eig_vals_pos_len = eig_vals_pos.size
cur_intensities = np.zeros((eig_vals_pos_len, 1))
vm = np.zeros((matrix_dim, eig_vals_pos_len))
for k in range(eig_vals_pos_len):
lt = eig_vals_diag[eig_vals_pos[k]]
vf = eig_vecs[:, eig_vals_pos[k]]
# Calculate normalized membership vector belonging to [0, 1] by
# projection on the space. The normalization factor is the
# Euclidean length of the vector
bf = np.maximum(zeros_vect, vf)
uf = np.minimum(bf, ones_vect)
if LA.norm(uf) > 0:
uf = uf / LA.norm(uf)
vt = uf.T.dot(At).dot(uf)
uf = np.squeeze(np.asarray(uf))
wt = uf.T.dot(uf)
# Calculates the intensity Lambda (la) of the cluster, which is
# defined almost as the Rayleigh quotient
if wt > 0:
la = vt.item() / (wt **2)
else:
la = 0
# since lt*vf =(-lt)*(-vf), try symmetric version
# using -vf:
vf1 = -vf
bf1 = np.maximum(zeros_vect, vf1)
uf1 = np.minimum(bf1, ones_vect)
uf1 = np.squeeze(np.asarray(uf1))
if LA.norm(uf1) > 0:
uf1 = uf1 / LA.norm(uf1)
vt1 = uf1.T.dot(At).dot(uf1)
wt1 = uf1.T.dot(uf1)
if wt1 > 0:
la1 = vt1.item() / (wt1 **2)
else:
la1 = 0
if la > la1:
cur_intensities[k] = la
vm[:, k] = uf.ravel()
else:
cur_intensities[k] = la1
vm[:, k] = uf1.ravel()
contrib_max, contrib_max_index = cur_intensities.max(), cur_intensities.argmax()
if contrib_max > ZERO_BOUND:
lat = np.append(lat, eig_vals[eig_vals_pos[contrib_max_index]])
intensities = np.append(intensities, np.matrix([np.sqrt(contrib_max),
contrib_max]), axis=0)
# square root and value of lambda intensity of cluster_got
# square root shows the value of fuzzyness
uf = vm[:, contrib_max_index]
vt = uf.T.dot(At).dot(uf)
wt = uf.T.dot(uf)
membership_matrix = np.append(membership_matrix, np.matrix(uf).T, axis=1)
# calculate residual similarity matrix:
# remove the present cluster (i.e. itensity* membership) from
# similarity matrix
Att = At - contrib_max * np.matrix(uf).T * np.matrix(uf)
At = (Att + Att.T) / 2
matrix_sequence.append(At)
curr_cont = (vt / wt) ** 2
# Calculate the relative contribution of cluster_got
curr_cont /= scatter
contrib = np.append(contrib, curr_cont)
# Calculate the residual contribution
res_cont -= curr_cont
cluster_got += 1
else:
is_positive = False
if not is_positive:
print('No positive weights at spectral clusters')
elif curr_cont < min_cont:
print('Cluster contribution is too small')
elif res_cont < eps:
print('Residual is too small')
elif cluster_got > max_clust_num:
print('Maximum number of clusters reached')
return matrix_sequence, membership_matrix, contrib, intensities, lat, cluster_got
if __name__ == '__main__':
M = np.matrix([[1, .5, .3, .1],
[.5, 1, .98, .4],
[.3, .98, 1, .6],
[.1, .4, .6, 1 ]])
#M = np.matrix([[1, 0, 1], [0, 3, 0], [1, 0, 9]])
M = np.matrix(np.random.rand(500, 500))
B, member, contrib, intensity, lat, tt = faddis(M)
print("B")
print(B)
print("member")
print(member)
print("contrib")
print(contrib)
print("intensity")
print(intensity)
print("lat")
print(lat)
print("tt")
print(tt)
| StarcoderdataPython |
3554157 | #open a file without knowing what encoding the file is taken
#
import codecs
encode = ["utf8", "gbk", "gb2312"]
filename = ""
content = ""
for code in encode:
f = codecs.open(filename, "r", encoding = code)
try:
content = f.readlines()
except:
f.close()
continue
f.close()
if content:
break
else:
continue
| StarcoderdataPython |
6700616 | <reponame>lby314xx/MLP-coursework
import torch
import torch.nn as nn
import torch.nn.init as init
from functools import reduce
class Net(nn.Module):
def __init__(self, blocks, rate):
super(Net, self).__init__()
self.convt_I1 = nn.ConvTranspose2d(1, 1, kernel_size=int(4*rate//2), stride=rate, padding=rate//2, bias=False)
self.conv_input = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.PReLU()
self.convt_F1 = self._make_layer(SKBlock(64), blocks)
self.Transpose = nn.ConvTranspose2d(64, 64, kernel_size=int(4*rate//2), stride=rate, padding=rate//2, bias=False)
self.relu_transpose = nn.PReLU()
self.convt_R1 = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
init.orthogonal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
def _make_layer(self, block, blocks):
layers = []
for _ in range(blocks):
layers.append(block)
return nn.Sequential(*layers)
def forward(self, x):
convt_I1 = self.convt_I1(x)
out = self.relu(self.conv_input(x))
convt_F1 = self.convt_F1(out)
convt_out = self.relu_transpose(self.Transpose(convt_F1))
convt_R1 = self.convt_R1(convt_out)
HR = convt_I1 + convt_R1
return HR
class L1_Charbonnier_loss(nn.Module):
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-6
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps)
loss = torch.sum(error)
return loss
class SKBlock(nn.Module):
def __init__(self, planes, stride=1, use_sk=True):
super(SKBlock, self).__init__()
self.use_sk = use_sk
self.conv1 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.relu1 = nn.PReLU()
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.relu2 = nn.PReLU()
self.sk = SKLayer(planes)
self.channelAttention = channelAttention(planes, planes)
def forward(self, x):
residual = x
out = self.relu1(self.conv1(x))
out = self.relu2(self.conv2(out))
if self.use_sk:
out = self.sk(out)
out = self.channelAttention(out)
out += residual
return out
class SKLayer(nn.Module):
def __init__(self, channel, stride=1, M=2, r=16, L=32):
super(SKLayer, self).__init__()
self.M = M
self.out_channels = channel
d = max(channel//r, L)
self.conv = nn.ModuleList()
for i in range(M):
self.conv.append(nn.Sequential(nn.Conv2d(channel, channel, 3, stride, padding=1+i, dilation=1+i, groups=32, bias=False),
nn.PReLU()))
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Sequential(nn.Conv2d(channel, d, 1, bias=False),
nn.PReLU())
self.fc2 = nn.Conv2d(d, channel*M, 1, 1, bias=False)
self.softmax = nn.Softmax(dim=1)
self.channelAttention = channelAttention(channel, channel)
def forward(self, xx):
batch_size = xx.size(0)
output = []
# split
for i, conv in enumerate(self.conv):
output.append(conv(xx))
# fusion
U = reduce(lambda x, y: x+y, output)
s = self.global_pool(U)
z = self.fc1(s)
a_b = self.fc2(z)
a_b = a_b.reshape(batch_size, self.M, self.out_channels, -1)
a_b = self.softmax(a_b)
# the part of selection
a_b = list(a_b.chunk(self.M, dim=1)) # split to a and b
a_b = list(map(lambda x: x.reshape(batch_size, self.out_channels, 1, 1), a_b))
V = list(map(lambda x, y: x*y, output, a_b))
V = reduce(lambda x, y: x+y, V)
V = self.channelAttention(V)
return V
class channelAttention(nn.Module):
def __init__(self, inChannels, outChannels):
super(channelAttention, self).__init__()
self.swish = nn.Sigmoid()
self.channel_squeeze = nn.AdaptiveAvgPool2d(1)
self.conv_down = nn.Conv2d(inChannels * 4, inChannels // 4, kernel_size=1, bias=False)
self.conv_up = nn.Conv2d(inChannels // 4, inChannels * 4, kernel_size=1, bias=False)
self.sig = nn.Sigmoid()
self.trans1 = nn.Sequential(
nn.Conv2d(in_channels=inChannels, out_channels=inChannels * 4, kernel_size=1, stride=1, padding=0, bias=False),
nn.PReLU(),
)
self.trans2 = nn.Sequential(
nn.Conv2d(in_channels=inChannels * 4, out_channels=outChannels, kernel_size=1, stride=1, padding=0, bias=False),
nn.PReLU(),
)
def forward(self, x):
ex = self.trans1(x)
out1 = self.channel_squeeze(ex)
out1 = self.conv_down(out1)
out1 = out1*self.swish(out1)
out1 = self.conv_up(out1)
weight = self.sig(out1)
out = ex*weight
out = self.trans2(out)
return out
| StarcoderdataPython |
3593604 | '''
Og é um homem das cavernas com vários filhos e filhas, e ele quer contar todos eles. Og conta seus filhos com sua mão esquerda e suas filhas com sua mão direita.
Entretanto, Og não é inteligente, e não sabe somar os dois números. Assim, ele pediu para você escrever um programa que realize a soma.
Entrada
A entrada contém vários casos de teste. Cada caso de teste consiste em uma linha contendo dois inteiros L e R, separados por um espaço, indicando o número de filhos e de filhas, respectivamente (1 ≤ L, R ≤ 5).
O fim do arquivo de entrada é indicado por L = R = 0.
Saída
Para cada caso de teste, imprima uma linha contendo um inteiro indicando quantos filhos e filhas Og tem ao todo.
'''
while True:
entrada = str(input())
if entrada == '0 0':
break
entrada = entrada.split()
for k, v in enumerate(entrada):
entrada[k] = int(v)
print(sum(entrada))
| StarcoderdataPython |
4888844 | # Script : areas_of_triangles
# Description : This program asks the user for the length and width
# of two rectangles and then tell the user which rectangle
# has the greater area, or if the areas are the same using the formula
# Area = Length * Breadth
# Programmer : <NAME>
# Date : 19.05.16
# Defines the main function
def main():
print("This program asks the user for the length and width")
print("of two rectangles and then tell the user which rectangle")
print("has the greater area, or if the areas are the same using the formula")
print("Area = Length * Breadth")
print()
# Asks User for length and width of both rectangles and assign them
# to length and width variables.
Length1 = float(input("Please Enter the length for rectangle 1: "))
Width1 = float(input("Please Enter the width for rectangle 1: "))
print()
Length2 = float(input("Please Enter the length for rectangle2 : "))
Width2 = float(input("Please Enter the Width for rectangle 2: "))
print()
# Calculates the Area of rectangle 1
Area1 = Length1 * Width1
print("The Area of Rectangle 1 is", Area1)
print()
# Calculates the Area of rectangle 2
Area2 = Length2 * Width2
print("The Area of Rectangle 2 is", Area2)
print()
# Loop to determine which rectangle is bigger or if they are the same in this manner.
if Area1 == Area2:
print("The Areas are the same")
elif Area2 > Area1:
print("Rectangle 2 has the greater Area")
else:
print("Rectangle 1 has the greater Area")
# Calls the main function
main()
| StarcoderdataPython |
6458215 | <reponame>insad-video/marsha
"""Structure of Video related models API responses with Django Rest Framework serializers."""
from datetime import timedelta
from urllib.parse import quote_plus
from django.conf import settings
from django.urls import reverse
from django.utils import timezone
from django.utils.text import slugify
from rest_framework import serializers
from ..defaults import (
ENDED,
HARVESTED,
IDLE,
JITSI,
LIVE_CHOICES,
LIVE_TYPE_CHOICES,
RUNNING,
STOPPED,
)
from ..models import Thumbnail, Video
from ..utils import cloudfront_utils, jitsi_utils, time_utils, xmpp_utils
from ..utils.url_utils import build_absolute_uri_behind_proxy
from .base import TimestampField, get_video_cloudfront_url_params
from .playlist import PlaylistLiteSerializer
from .shared_live_media import SharedLiveMediaSerializer
from .thumbnail import ThumbnailSerializer
from .timed_text_track import TimedTextTrackSerializer
class UpdateLiveStateSerializer(serializers.Serializer):
"""A serializer to validate data submitted on the UpdateLiveState API endpoint."""
state = serializers.ChoiceField(
tuple(c for c in LIVE_CHOICES if c[0] in (RUNNING, STOPPED, HARVESTED))
)
logGroupName = serializers.CharField()
requestId = serializers.CharField()
extraParameters = serializers.DictField(allow_null=True, required=False)
class InitLiveStateSerializer(serializers.Serializer):
"""A serializer to validate data submitted on the initiate-live API endpoint."""
type = serializers.ChoiceField(LIVE_TYPE_CHOICES)
class VideoBaseSerializer(serializers.ModelSerializer):
"""Base Serializer to factorize common Video attributes."""
thumbnail_instance = None
class Meta: # noqa
model = Video
fields = (
"urls",
"thumbnail",
"is_ready_to_show",
)
read_only_fields = (
"urls",
"is_ready_to_show",
)
urls = serializers.SerializerMethodField()
thumbnail = serializers.SerializerMethodField()
is_ready_to_show = serializers.BooleanField(read_only=True)
def to_representation(self, instance):
"""
Object instance -> Dict of primitive datatypes.
Try to fetch existing thumbnail related to the current video.
If a thumbnail exists, we keep it in the serializer instance
to use it several times without having to fetch it again and again
in the database.
"""
try:
self.thumbnail_instance = instance.thumbnail.get()
except Thumbnail.DoesNotExist:
pass
return super().to_representation(instance)
def get_thumbnail(self, _):
"""Return a serialized thumbnail if it exists."""
if self.thumbnail_instance:
return ThumbnailSerializer(self.thumbnail_instance).data
return None
def get_urls(self, obj):
"""Urls of the video for each type of encoding.
Parameters
----------
obj : Type[models.Video]
The video that we want to serialize
Returns
-------
Dictionary or None
A dictionary of all urls for:
- mp4 encodings of the video in each resolution
- jpeg thumbnails of the video in each resolution
- manifest of the HLS encodings of the video
For a video in live mode only the HLS url is added
None if the video is still not uploaded to S3 with success
"""
if not self.context.get("is_admin") and obj.live_state == HARVESTED:
return None
if obj.live_info is not None and obj.live_info.get("mediapackage"):
# Adaptive Bit Rate manifests
return {
"manifests": {
"hls": obj.live_info["mediapackage"]["endpoints"]["hls"]["url"],
},
"mp4": {},
"thumbnails": {},
}
if obj.uploaded_on is None:
return None
thumbnail_urls = {}
if self.thumbnail_instance and self.thumbnail_instance.uploaded_on is not None:
thumbnail_serialized = ThumbnailSerializer(self.thumbnail_instance)
thumbnail_urls.update(thumbnail_serialized.data.get("urls"))
urls = {"mp4": {}, "thumbnails": {}}
base = f"{settings.AWS_S3_URL_PROTOCOL}://{settings.CLOUDFRONT_DOMAIN}/{obj.pk}"
stamp = time_utils.to_timestamp(obj.uploaded_on)
if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:
params = get_video_cloudfront_url_params(obj.pk)
filename = f"{slugify(obj.playlist.title)}_{stamp}.mp4"
content_disposition = quote_plus(f"attachment; filename={filename}")
for resolution in obj.resolutions:
# MP4
mp4_url = (
f"{base}/mp4/{stamp}_{resolution}.mp4"
f"?response-content-disposition={content_disposition}"
)
# Thumbnails
urls["thumbnails"][resolution] = thumbnail_urls.get(
resolution,
f"{base}/thumbnails/{stamp}_{resolution}.0000000.jpg",
)
# Sign the urls of mp4 videos only if the functionality is activated
if settings.CLOUDFRONT_SIGNED_URLS_ACTIVE:
mp4_url = cloudfront_utils.build_signed_url(mp4_url, params)
urls["mp4"][resolution] = mp4_url
if obj.live_state != HARVESTED:
# Adaptive Bit Rate manifests
urls["manifests"] = {
"hls": f"{base}/cmaf/{stamp}.m3u8",
}
# Previews
urls["previews"] = f"{base}/previews/{stamp}_100.jpg"
return urls
class VideoSerializer(VideoBaseSerializer):
"""Serializer to display a video model with all its resolution options."""
class Meta: # noqa
model = Video
fields = (
"active_shared_live_media",
"active_shared_live_media_page",
"active_stamp",
"allow_recording",
"description",
"estimated_duration",
"has_chat",
"has_live_media",
"id",
"is_public",
"is_ready_to_show",
"is_recording",
"is_scheduled",
"join_mode",
"timed_text_tracks",
"thumbnail",
"title",
"upload_state",
"urls",
"show_download",
"should_use_subtitle_as_transcript",
"starting_at",
"has_transcript",
"participants_asking_to_join",
"participants_in_discussion",
"playlist",
"recording_time",
"live_info",
"live_state",
"live_type",
"xmpp",
"shared_live_medias",
)
read_only_fields = (
"active_shared_live_media",
"active_shared_live_media_page",
"id",
"active_stamp",
"is_ready_to_show",
"is_recording",
"is_scheduled",
"upload_state",
"urls",
"has_transcript",
"recording_time",
"live_info",
"live_state",
"participants_asking_to_join",
"participants_in_discussion",
)
active_shared_live_media = SharedLiveMediaSerializer(read_only=True)
active_stamp = TimestampField(
source="uploaded_on", required=False, allow_null=True, read_only=True
)
timed_text_tracks = TimedTextTrackSerializer(
source="timedtexttracks", many=True, read_only=True
)
shared_live_medias = SharedLiveMediaSerializer(many=True, read_only=True)
playlist = PlaylistLiteSerializer(read_only=True)
has_transcript = serializers.SerializerMethodField()
live_info = serializers.SerializerMethodField()
xmpp = serializers.SerializerMethodField()
title = serializers.CharField(allow_blank=False, allow_null=False, max_length=255)
def validate_starting_at(self, value):
"""Add extra controls for starting_at field."""
# Field starting_at has a new value
if value != self.instance.starting_at:
# New value is past, it can't be updated
if value is not None and value < timezone.now():
raise serializers.ValidationError(
f"{value} is not a valid date, date should be planned after!"
)
# Check live_state is in IDLE state as expected when scheduling a live
if self.instance.live_state != IDLE:
raise serializers.ValidationError(
"Field starting_at can't be changed, video live is "
"not in default mode."
)
# Initial value is already past, it can't be updated anymore
if (
self.instance.starting_at is not None
and self.instance.starting_at < timezone.now()
):
raise serializers.ValidationError(
f"Field starting_at {self.instance.starting_at} is already "
"past and can't be updated!"
)
return value
def validate_estimated_duration(self, value):
"""Reject negative duration"""
if value != self.instance.estimated_duration:
if value.days < 0:
raise serializers.ValidationError(
"Ensure this value is greater than or equal to 0."
)
return value
def get_xmpp(self, obj):
"""Chat info.
Parameters
----------
obj : Type[models.Video]
The video that we want to serialize
Returns
-------
Dictionnary
A dictionary containing all info needed to manage a connection to a xmpp server.
"""
if (
settings.LIVE_CHAT_ENABLED
and obj.live_state
and obj.live_state not in [IDLE]
):
token = xmpp_utils.generate_jwt(
str(obj.id),
"owner" if self.context.get("is_admin") else "member",
timezone.now() + timedelta(days=1),
)
return {
"bosh_url": xmpp_utils.add_jwt_token_to_url(
settings.XMPP_BOSH_URL, token
),
"converse_persistent_store": settings.XMPP_CONVERSE_PERSISTENT_STORE,
"websocket_url": xmpp_utils.add_jwt_token_to_url(
settings.XMPP_WEBSOCKET_URL, token
),
"conference_url": f"{obj.id}@{settings.XMPP_CONFERENCE_DOMAIN}",
"jid": settings.XMPP_DOMAIN,
}
return None
def get_live_info(self, obj):
"""Live streaming informations.
Parameters
----------
obj : Type[models.Video]
The video that we want to serialize
Returns
-------
Dictionnary
A dictionnary containing all info needed to manage a live stream for an admin.
For other users, an empty dictionnary is returned.
The data are filtered to only return RTMP endpoints and jitsi configuration if needed.
All other data are sensitive, used only by the backend and must never be exposed.
"""
if obj.live_state in [None, ENDED]:
return {}
live_info = {}
if obj.live_info is not None:
for attribute in ["paused_at", "started_at", "stopped_at"]:
if obj.live_info.get(attribute):
live_info.update({attribute: obj.live_info[attribute]})
if not self.context.get("is_admin"):
return live_info
if obj.live_info is not None and obj.live_info.get("medialive"):
live_info.update(
{
"medialive": {
"input": {
"endpoints": obj.live_info["medialive"]["input"][
"endpoints"
],
}
},
}
)
if obj.live_type == JITSI:
jitsi = {
"external_api_url": settings.JITSI_EXTERNAL_API_URL,
"domain": settings.JITSI_DOMAIN,
"config_overwrite": settings.JITSI_CONFIG_OVERWRITE,
"interface_config_overwrite": settings.JITSI_INTERFACE_CONFIG_OVERWRITE,
"room_name": str(obj.pk),
}
if settings.JITSI_JWT_APP_ID and settings.JITSI_JWT_APP_SECRET:
jitsi["token"] = jitsi_utils.generate_token(
str(obj.pk), self.context.get("is_admin")
)
live_info.update({"jitsi": jitsi})
return live_info
def get_has_transcript(self, obj):
"""Compute if should_use_subtitle_as_transcript behavior is disabled.
Parameters
----------
obj : Type[models.Video]
The video that we want to serialize
Returns
-------
Boolean
If there is at least one transcript ready to be shown the method will return True.
Returns False otherwise.
"""
return obj.timedtexttracks.filter(mode="ts", uploaded_on__isnull=False).exists()
class VideoSelectLTISerializer(VideoBaseSerializer):
"""A serializer to display a Video resource for LTI select content request."""
class Meta: # noqa
model = Video
fields = (
"id",
"is_ready_to_show",
"thumbnail",
"title",
"description",
"upload_state",
"urls",
"lti_url",
)
read_only_fields = (
"id",
"is_ready_to_show",
"thumbnail",
"title",
"description",
"upload_state",
"urls",
"lti_url",
)
lti_url = serializers.SerializerMethodField()
def get_lti_url(self, obj):
"""LTI Url of the Video.
Parameters
----------
obj : Type[models.Video]
The document that we want to serialize
Returns
-------
String
the LTI url to be used by LTI consumers
"""
return build_absolute_uri_behind_proxy(
self.context["request"],
reverse("video_lti_view", args=[obj.id]),
)
class ParticipantSerializer(serializers.Serializer):
"""A serializer to validate a participant submitted on the live participants API endpoint."""
name = serializers.CharField(required=True, max_length=128)
id = serializers.CharField(required=True, max_length=128)
| StarcoderdataPython |
354437 | <filename>indico/modules/users/legacy.py
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask_multipass import IdentityInfo
from indico.legacy.common.cache import GenericCache
from indico.legacy.fossils.user import IAvatarFossil, IAvatarMinimalFossil
from indico.modules.auth import Identity
from indico.modules.users import User, logger
from indico.util.caching import memoize_request
from indico.util.fossilize import Fossilizable, fossilizes
from indico.util.locators import locator_property
from indico.util.string import encode_utf8, return_ascii, to_unicode
AVATAR_FIELD_MAP = {
'email': 'email',
'name': 'first_name',
'surName': 'last_name',
'organisation': 'affiliation'
}
class AvatarUserWrapper(Fossilizable):
"""Avatar-like wrapper class that holds a DB-stored user."""
fossilizes(IAvatarFossil, IAvatarMinimalFossil)
def __init__(self, user_id):
self.id = str(user_id)
@property
@memoize_request
def _original_user(self):
# A proper user, with an id that can be mapped directly to sqlalchemy
if isinstance(self.id, int) or self.id.isdigit():
return User.get(int(self.id))
# A user who had no real indico account but an ldap identifier/email.
# In this case we try to find his real user and replace the ID of this object
# with that user's ID.
data = self.id.split(':')
# TODO: Once everything is in SQLAlchemy this whole thing needs to go away!
user = None
if data[0] == 'LDAP':
identifier = data[1]
email = data[2]
# You better have only one ldap provider or at least different identifiers ;)
identity = Identity.query.filter(Identity.provider != 'indico', Identity.identifier == identifier).first()
if identity:
user = identity.user
elif data[0] == 'Nice':
email = data[1]
else:
return None
if not user:
user = User.query.filter(User.all_emails == email).first()
if user:
self._old_id = self.id
self.id = str(user.id)
logger.info("Updated legacy user id (%s => %s)", self._old_id, self.id)
return user
@property
@memoize_request
def user(self):
user = self._original_user
if user is not None and user.is_deleted and user.merged_into_id is not None:
while user.merged_into_id is not None:
user = user.merged_into_user
return user
def getId(self):
return str(self.user.id) if self.user else str(self.id)
@property
def api_key(self):
return self.user.api_key if self.user else None
def getStatus(self):
return 'deleted' if not self.user or self.user.is_deleted else 'activated'
def isActivated(self):
# All accounts are activated during the transition period
return True
def isDisabled(self):
# The user has been blocked or deleted (due to merge)
return not self.user or self.user.is_blocked or self.user.is_deleted
def setName(self, name, reindex=False):
self.user.first_name = to_unicode(name)
@encode_utf8
def getName(self):
return self.user.first_name if self.user else ''
getFirstName = getName
def setSurName(self, surname, reindex=False):
self.user.last_name = to_unicode(surname)
@encode_utf8
def getSurName(self):
return self.user.last_name if self.user else ''
getFamilyName = getSurName
@encode_utf8
def getFullName(self):
if not self.user:
return ''
return self.user.get_full_name(last_name_first=True, last_name_upper=True,
abbrev_first_name=False, show_title=False)
@encode_utf8
def getStraightFullName(self, upper=True):
if not self.user:
return ''
return self.user.get_full_name(last_name_first=False, last_name_upper=upper,
abbrev_first_name=False, show_title=False)
getDirectFullNameNoTitle = getStraightFullName
@encode_utf8
def getAbrName(self):
if not self.user:
return ''
return self.user.get_full_name(last_name_first=True, last_name_upper=False,
abbrev_first_name=True, show_title=False)
@encode_utf8
def getStraightAbrName(self):
if not self.user:
return ''
return self.user.get_full_name(last_name_first=False, last_name_upper=False,
abbrev_first_name=True, show_title=False)
def setOrganisation(self, affiliation, reindex=False):
self.user.affiliation = to_unicode(affiliation)
@encode_utf8
def getOrganisation(self):
return self.user.affiliation if self.user else ''
getAffiliation = getOrganisation
def setTitle(self, title):
self.user.title = to_unicode(title)
@encode_utf8
def getTitle(self):
return self.user.title if self.user else ''
def setTimezone(self, tz):
self.user.settings.set('timezone', to_unicode(tz))
@encode_utf8
def getAddress(self):
return self.user.address if self.user else ''
def setAddress(self, address):
self.user.address = to_unicode(address)
def getEmails(self):
# avoid 'stale association proxy'
user = self.user
return set(user.all_emails) if user else set()
@encode_utf8
def getEmail(self):
return self.user.email if self.user else ''
email = property(getEmail)
def setEmail(self, email, reindex=False):
self.user.email = to_unicode(email)
def hasEmail(self, email):
user = self.user # avoid 'stale association proxy'
if not user:
return False
return email.lower() in user.all_emails
@encode_utf8
def getTelephone(self):
return self.user.phone if self.user else ''
def getFax(self):
# Some older code still clones fax, etc...
# it's never shown in the interface anyway.
return ''
getPhone = getTelephone
def setTelephone(self, phone):
self.user.phone = to_unicode(phone)
setPhone = setTelephone
def canUserModify(self, avatar):
if not self.user:
return False
return avatar.id == str(self.user.id) or avatar.user.is_admin
@locator_property
def locator(self):
d = {}
if self.user:
d['userId'] = self.user.id
return d
def isAdmin(self):
if not self.user:
return False
return self.user.is_admin
@property
def as_new(self):
return self.user
def __eq__(self, other):
if not isinstance(other, (AvatarUserWrapper, User)):
return False
elif str(self.id) == str(other.id):
return True
elif self.user:
return str(self.user.id) == str(other.id)
else:
return False
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(str(self.id))
@return_ascii
def __repr__(self):
if self.user is None:
return u'<AvatarUserWrapper {}: user does not exist>'.format(self.id)
elif self._original_user.merged_into_user:
return u'<AvatarUserWrapper {}: {} ({}) [{}]>'.format(
self.id, self._original_user.full_name, self._original_user.email, self.user.id)
else:
return u'<AvatarUserWrapper {}: {} ({})>'.format(self.id, self.user.full_name, self.user.email)
class AvatarProvisionalWrapper(Fossilizable):
"""Wrap provisional data for users that are not in the DB yet."""
fossilizes(IAvatarFossil, IAvatarMinimalFossil)
def __init__(self, identity_info):
self.identity_info = identity_info
self.data = identity_info.data
def getId(self):
return u"{}:{}".format(self.identity_info.provider.name, self.identity_info.identifier)
id = property(getId)
@encode_utf8
def getEmail(self):
return self.data['email']
def getEmails(self):
return [self.data['email']]
@encode_utf8
def getFirstName(self):
return self.data.get('first_name', '')
@encode_utf8
def getFamilyName(self):
return self.data.get('last_name', '')
@encode_utf8
def getStraightFullName(self, upper=False):
last_name = to_unicode(self.data.get('last_name', ''))
if upper:
last_name = last_name.upper()
return u'{} {}'.format(to_unicode(self.data.get('first_name', '')), last_name)
def getTitle(self):
return ''
@encode_utf8
def getTelephone(self):
return self.data.get('phone', '')
getPhone = getTelephone
@encode_utf8
def getOrganisation(self):
return self.data.get('affiliation', '')
getAffiliation = getOrganisation
def getFax(self):
return None
def getAddress(self):
return u''
@return_ascii
def __repr__(self):
return u'<AvatarProvisionalWrapper {}: {} ({first_name} {last_name})>'.format(
self.identity_info.provider.name,
self.identity_info.identifier,
**self.data.to_dict())
def search_avatars(criteria, exact=False, search_externals=False):
from indico.modules.users.util import search_users
if not any(criteria.viewvalues()):
return []
def _process_identities(obj):
if isinstance(obj, IdentityInfo):
GenericCache('pending_identities').set('{}:{}'.format(obj.provider.name, obj.identifier), obj.data)
return AvatarProvisionalWrapper(obj)
else:
return obj.as_avatar
results = search_users(exact=exact, external=search_externals,
**{AVATAR_FIELD_MAP[k]: v for (k, v) in criteria.iteritems() if v})
return [_process_identities(obj) for obj in results]
| StarcoderdataPython |
6648133 | import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dataset.dataset import AVDataset
from models.basic_model import AVClassifier
from utils.utils import setup_seed, weight_init
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True, type=str,
help='VGGSound, KineticSound, CREMAD, AVE')
parser.add_argument('--modulation', default='OGM_GE', type=str,
choices=['Normal', 'OGM', 'OGM_GE'])
parser.add_argument('--fusion_method', default='concat', type=str,
choices=['sum', 'concat', 'gated', 'film'])
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=100, type=int)
parser.add_argument('--optimizer', default='sgd', type=str, choices=['sgd', 'adam'])
parser.add_argument('--learning_rate', default=0.001, type=float, help='initial learning rate')
parser.add_argument('--lr_decay_step', default=70, type=int, help='where learning rate decays')
parser.add_argument('--lr_decay_ratio', default=0.1, type=float, help='decay coefficient')
parser.add_argument('--modulation_starts', default=0, type=int, help='where modulation begins')
parser.add_argument('--modulation_ends', default=50, type=int, help='where modulation ends')
parser.add_argument('--alpha', required=True, type=float, help='alpha in OGM-GE')
parser.add_argument('--ckpt_path', required=True, type=str, help='path to save trained models')
parser.add_argument('--train', action='store_true', help='turn on train mode')
parser.add_argument('--use_tensorboard', default=False, type=bool, help='whether to visualize')
parser.add_argument('--tensorboard_path', required=True, type=str, help='path to save tensorboard logs')
parser.add_argument('--random_seed', default=0, type=int)
parser.add_argument('--gpu_ids', default='0, 1', type=str, help='GPU ids')
return parser.parse_args()
def train_epoch(args, epoch, model, device, dataloader, optimizer, scheduler, writer=None):
criterion = nn.CrossEntropyLoss()
softmax = nn.Softmax(dim=1)
relu = nn.ReLU(inplace=True)
tanh = nn.Tanh()
model.train()
print("Start training ... ")
_loss = 0
_loss_a = 0
_loss_v = 0
for step, (spec, image, label, name) in enumerate(dataloader):
spec = spec.to(device)
image = image.to(device)
label = label.to(device)
optimizer.zero_grad()
# TODO: make it simpler and easier to extend
a, v, out = model(spec.unsqueeze(1).float(), image.float())
if args.fusion_method == 'sum':
out_v = (torch.mm(v, torch.transpose(model.module.fusion_module.fc_y.weight, 0, 1)) +
model.module.fusion_module.fc_y.bias / 2)
out_a = (torch.mm(a, torch.transpose(model.module.fusion_module.fc_x.weight, 0, 1)) +
model.module.fusion_module.fc_x.bias / 2)
else:
out_v = (torch.mm(v, torch.transpose(model.module.fusion_module.fc_out.weight[:, 512:], 0, 1)) +
model.module.fusion_module.fc_out.bias / 2)
out_a = (torch.mm(a, torch.transpose(model.module.fusion_module.fc_out.weight[:, :512], 0, 1)) +
model.module.fusion_module.fc_out.bias / 2)
loss = criterion(out, label)
loss_v = criterion(out_v, label)
loss_a = criterion(out_a, label)
loss.backward()
if args.modulation == 'Normal':
# no modulation, regular optimization
pass
else:
# Modulation starts here !
score_v = sum([softmax(out_v)[i][label[i]] for i in range(out_v.size(0))])
score_a = sum([softmax(out_a)[i][label[i]] for i in range(out_a.size(0))])
ratio_v = score_v / score_a
ratio_a = 1 / ratio_v
"""
Below is the Eq.(10) in our CVPR paper:
1 - tanh(alpha * rho_t_u), if rho_t_u > 1
k_t_u =
1, else
coeff_u is k_t_u, where t means iteration steps and u is modality indicator, either a or v.
"""
if ratio_v > 1:
coeff_v = 1 - tanh(args.alpha * relu(ratio_v))
coeff_a = 1
else:
coeff_a = 1 - tanh(args.alpha * relu(ratio_a))
coeff_v = 1
if args.use_tensorboard:
iteration = epoch * len(dataloader) + step
writer.add_scalar('data/ratio v', ratio_v, iteration)
writer.add_scalar('data/coefficient v', coeff_v, iteration)
writer.add_scalar('data/coefficient a', coeff_a, iteration)
if args.modulation_starts >= epoch >= args.modulation_ends:
coeff_v = 1
coeff_a = 1
for name, parms in model.named_parameters():
layer = str(name).split('.')[1]
if 'audio' in layer and len(parms.grad.size()) == 4:
if args.modulation == 'OGM_GE':
parms.grad = parms.grad * coeff_a + \
torch.zeros_like(parms.grad).normal_(0, parms.grad.std().item() + 1e-8)
else:
parms.grad *= coeff_a
if 'visual' in layer and len(parms.grad.size()) == 4:
if args.modulation == 'OGM_GE':
parms.grad = parms.grad * coeff_v + \
torch.zeros_like(parms.grad).normal_(0, parms.grad.std().item() + 1e-8)
else:
parms.grad *= coeff_v
optimizer.step()
_loss += loss.item()
_loss_a += loss_a.item()
_loss_v += loss_v.item()
scheduler.step()
return _loss / len(dataloader), _loss_a / len(dataloader), _loss_v / len(dataloader)
def valid(args, model, device, dataloader):
softmax = nn.Softmax(dim=1)
if args.dataset == 'VGGSound':
n_classes = 309
elif args.dataset == 'KineticSound':
n_classes = 31
elif args.dataset == 'CREMAD':
n_classes = 6
elif args.dataset == 'AVE':
n_classes = 28
else:
raise NotImplementedError('Incorrect dataset name {}'.format(args.dataset))
with torch.no_grad():
model.eval()
# TODO: more flexible
num = [0.0 for _ in range(n_classes)]
acc = [0.0 for _ in range(n_classes)]
acc_a = [0.0 for _ in range(n_classes)]
acc_v = [0.0 for _ in range(n_classes)]
for step, (spec, image, label, name) in enumerate(dataloader):
spec = spec.to(device)
image = image.to(device)
label = label.to(device)
a, v, out = model(spec.unsqueeze(1).float(), image.float(), label, -1)
if args.fusion_method == 'sum':
out_v = (torch.mm(v, torch.transpose(model.module.fusion_module.fc_y.weight, 0, 1)) +
model.module.fusion_module.fc_y.bias / 2)
out_a = (torch.mm(a, torch.transpose(model.module.fusion_module.fc_x.weight, 0, 1)) +
model.module.fusion_module.fc_x.bias / 2)
else:
out_v = (torch.mm(v, torch.transpose(model.module.fusion_module.fc_out.weight[:, 512:], 0, 1)) +
model.module.fusion_module.fc_out.bias / 2)
out_a = (torch.mm(a, torch.transpose(model.module.fusion_module.fc_out.weight[:, :512], 0, 1)) +
model.module.fusion_module.fc_out.bias / 2)
prediction = softmax(out)
pred_v = softmax(out_v)
pred_a = softmax(out_a)
for i, item in enumerate(name):
ma = np.max(prediction[i].cpu().data.numpy())
v = np.max(pred_v[i].cpu().data.numpy())
a = np.max(pred_a[i].cpu().data.numpy())
num[label[i]] += 1.0
if abs(prediction[i].cpu().data.numpy()[label[i]] - ma) <= 0.0001:
acc[label[i]] += 1.0
if abs(out_v[i].cpu().data.numpy()[label[i]] - v) <= 0.0001:
acc_v[label[i]] += 1.0
if abs(out_a[i].cpu().data.numpy()[label[i]] - a) <= 0.0001:
acc_a[label[i]] += 1.0
return sum(acc_v) / sum(num), sum(acc_a) / sum(num), sum(acc) / sum(num)
def main():
args = get_arguments()
print(args)
setup_seed(args.random_seed)
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
gpu_ids = list(range(torch.cuda.device_count()))
device = torch.device('cuda:0')
model = AVClassifier(args)
model.apply(weight_init)
model.to(device)
model = torch.nn.DataParallel(model, device_ids=gpu_ids)
model.cuda()
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, args.lr_decay_step, args.lr_decay_ratio)
if args.dataset == 'VGGSound':
train_dataset = AVDataset(args, mode='train')
test_dataset = AVDataset(args, mode='test')
elif args.dataset == 'KineticSound':
train_dataset = AVDataset(args, mode='train')
test_dataset = AVDataset(args, mode='test')
elif args.dataset == 'CREMAD':
train_dataset = AVDataset(args, mode='train')
test_dataset = AVDataset(args, mode='test')
elif args.dataset == 'AVE':
train_dataset = AVDataset(args, mode='train')
test_dataset = AVDataset(args, mode='test')
else:
raise NotImplementedError('Incorrect dataset name {}! '
'Only support VGGSound, KineticSound and CREMA-D for now!'.format(args.dataset))
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=32, pin_memory=True)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size,
shuffle=False, num_workers=32, pin_memory=True)
if args.train:
best_acc = 0.0
for epoch in range(args.epochs):
print('Epoch: {}: '.format(epoch))
if args.use_tensorboard:
writer_path = os.path.join(args.tensorboard_path, args.dataset)
if not os.path.exists(writer_path):
os.mkdir(writer_path)
log_name = '{}_{}'.format(args.fusion_method, args.modulation)
writer = SummaryWriter(os.path.join(writer_path, log_name))
batch_loss, batch_loss_a, batch_loss_v = train_epoch(args, epoch, model, device,
train_dataloader, optimizer, scheduler, writer)
acc, acc_a, acc_v = valid(args, model, device, test_dataloader)
writer.add_scalars('Loss', {'Total Loss': batch_loss,
'Audio Loss': batch_loss_a,
'Visual Loss': batch_loss_v}, epoch)
writer.add_scalars('Evaluation', {'Total Accuracy': acc,
'Audio Accuracy': acc_a,
'Visual Accuracy': acc_v}, epoch)
else:
batch_loss, batch_loss_a, batch_loss_v = train_epoch(args, epoch, model, device,
train_dataloader, optimizer, scheduler)
acc, acc_a, acc_v = valid(args, model, device, test_dataloader)
if acc > best_acc:
best_acc = float(acc)
if not os.path.exists(args.ckpt_path):
os.mkdir(args.ckpt_path)
model_name = 'best_model_of_dataset_{}_{}_alpha_{}_' \
'optimizer_{}_modulate_starts_{}_ends_{}_' \
'epoch_{}_acc_{}.pth'.format(args.dataset,
args.modulation,
args.alpha,
args.optimizer,
args.modulation_starts,
args.modulation_ends,
epoch, acc)
saved_dict = {'saved_epoch': epoch,
'modulation': args.modulation,
'alpha': args.alpha,
'fusion': args.fusion_method,
'acc': acc,
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()}
save_dir = os.path.join(args.ckpt_path, model_name)
torch.save(saved_dict, save_dir)
print('The best model has been saved at {}.'.format(save_dir))
print("Loss: {:.2f}, Acc: {:.2f}".format(batch_loss, acc))
else:
print("Loss: {:.2f}, Acc: {:.2f}, Best Acc: {:.2f}".format(batch_loss, acc, best_acc))
else:
# first load trained model
loaded_dict = torch.load(args.ckpt_path)
# epoch = loaded_dict['saved_epoch']
modulation = loaded_dict['modulation']
# alpha = loaded_dict['alpha']
fusion = loaded_dict['fusion']
state_dict = loaded_dict['model']
# optimizer_dict = loaded_dict['optimizer']
# scheduler = loaded_dict['scheduler']
assert modulation == args.modulation, 'inconsistency between modulation method of loaded model and args !'
assert fusion == args.fusion_method, 'inconsistency between fusion method of loaded model and args !'
model = model.load_state_dict(state_dict)
print('Trained model loaded!')
acc, acc_a, acc_v = valid(args, model, device, test_dataloader)
print('Accuracy: {}, accuracy_a: {}, accuracy_v: {}'.format(acc, acc_a, acc_v))
if __name__ == "__main__":
main()
| StarcoderdataPython |
4825802 | <gh_stars>0
#!/usr/bin/env python
############################################################################
# Copyright (c) 2015 Saint Petersburg State University
# All Rights Reserved
# See file LICENSE for details.
############################################################################
import os
import sys
pipeline_modules_home = 'src/spades_pipeline/' # os.path.dirname(os.path.realpath(__file__)
sys.path.append(os.path.join(pipeline_modules_home, "common"))
sys.path.append(os.path.join(pipeline_modules_home, "truspades"))
# import alignment
import sam_parser
import break_by_coverage
import SeqIO
def break_contigs(contigs_file, sam_file, output_file):
contigs = list(SeqIO.parse(open(contigs_file, "rU"), "fasta"))
# sam = sam_parser.SamChain([sam_parser.Samfile(sam_file) for sam_file in sam_files])
sam = sam_parser.Samfile(sam_file)
# last two arguments: K, min0 stretch length to break
coverage_breaker = break_by_coverage.ContigBreaker(contigs, sam, 100, 50)
coverage_breaker.OutputBroken(output_file)
# contigs = list(SeqIO.parse(open(contigs_file, "rU"), "fasta"))
# output = open(output_file, "w")
# for contig in contigs:
# for subcontig in coverage_breaker.Break(contig):
# SeqIO.write(subcontig, output, "fasta")
# output.close()
if __name__ == '__main__':
if len(sys.argv) < 4:
sys.stderr.write("Usage: %s <contigs> <sam_file> <output_filename>\n" % sys.argv[0])
exit(1)
contigs_file = sys.argv[1]
sam_file = sys.argv[2]
output_file = sys.argv[3]
break_contigs(contigs_file, sam_file, output_file);
| StarcoderdataPython |
8137861 | from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
import tensorflow as tf
from tensorflow.keras import Model
from functions import *
import os
import numpy as np
new_path = "D:\\Data\\archive\\numpy"
train_y_number = new_path+"\\train_y_number.npy"
test_y_number = new_path+"\\test_y_number.npy"
train_x_n = new_path+"\\train_x_n.npy"
test_x_n = new_path+"\\test_x_n.npy"
train_y = np.load(train_y_number)
test_y = np.load(test_y_number)
train_x = np.load(train_x_n)
test_x = np.load(test_x_n)
new_path = "D:\\Data\\archive"
# ResNet50
# model_path = new_path+"\\ResNet50v2.h5"
# model = tf.keras.models.load_model(model_path)
# model = Model(inputs = model.input, outputs = model.layers[-2].output)
# x1 = model.predict(train_x,batch_size=16,verbose=1)
# print(x1.shape)
# x_t1 = model.predict(test_x,batch_size=16,verbose=1)
# VGG16
model_path = new_path+"\\VGG16v7.h5"
model = tf.keras.models.load_model(model_path)
model = Model(inputs = model.input, outputs = model.layers[-2].output)
x2 = model.predict(train_x,batch_size=16,verbose=1)
x_t2 = model.predict(test_x,batch_size=16,verbose=1)
print(x2.shape)
# VGG19
# model_path = new_path+"\\VGG19.h5"
# model = tf.keras.models.load_model(model_path)
# model = Model(inputs = model.input, outputs = model.layers[-2].output)
# x3 = model.predict(train_x,batch_size=16,verbose=1)
# x_t3 = model.predict(test_x,batch_size=16,verbose=1)
# print(x3.shape)
# combining data
# sumX = np.concatenate((x1, x2,x3), axis=0)
# sumX_t = np.concatenate((x_t1, x_t2,x_t3), axis=0)
# print("total train",sumX.shape)
# print("total test",sumX_t.shape)
# t_y = np.tile(train_y, 3)
# ts_y = np.tile(test_y, 3)
label = np.unique(train_y).tolist()
t_y = converting_to_number(train_y,label)
ts_y = converting_to_number(test_y,label)
t_y = np.array(t_y)
ts_y = np.array(ts_y)
print(t_y.shape)
# clf = RandomForestClassifier(n_estimators=600)
# clf.fit(x2,t_y)
# predidct_y = clf.predict(x_t2)
# print("Accuracy:",metrics.accuracy_score(ts_y, predidct_y))
# from sklearn import svm
# clf = svm.SVC(kernel="poly") # Linear Kernel
# clf.fit(x2,t_y)
# predidct_y = clf.predict(x_t2)
# print("Accuracy:",metrics.accuracy_score(ts_y, predidct_y))
# from sklearn.neighbors import KNeighborsClassifier
# clf = KNeighborsClassifier(n_neighbors=7)
# clf.fit(x2,t_y)
# predidct_y = clf.predict(x_t2)
# print("Accuracy:",metrics.accuracy_score(ts_y, predidct_y))
# from sklearn.ensemble import VotingClassifier
# rd = RandomForestClassifier(n_estimators=600)
# sv = svm.SVC(kernel="rbf")
# kn = KNeighborsClassifier(n_neighbors=20)
# model = VotingClassifier(estimators=[('rd', rd), ('sv', sv),('kn', kn)], voting='hard')
# model.fit(x2,t_y)
# predidct_y = model.predict(x_t2)
# print("Voting Accuracy:",metrics.accuracy_score(ts_y, predidct_y))
# from sklearn.linear_model import SGDClassifier
# clf = SGDClassifier(loss="hinge", penalty="l2")
# clf.fit(x2,t_y)
# predidct_y = clf.predict(x_t2)
# print("Accuracy:",metrics.accuracy_score(ts_y, predidct_y))
# from sklearn.ensemble import BaggingClassifier
# clf = BaggingClassifier(base_estimator=RandomForestClassifier(),n_estimators=600, random_state=0)
# clf.fit(x2,t_y)
# predidct_y = clf.predict(x_t2)
# print("Accuracy:",metrics.accuracy_score(ts_y, predidct_y))
from sklearn.mixture import GaussianMixture as GMM
gmm = GMM(n_components=5).fit(x2)
labels = gmm.predict(x_t2)
print(gmm.score(x_t2))
# plt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis')
# # grid search
# from sklearn.ensemble import RandomForestClassifier
# from sklearn.model_selection import RandomizedSearchCV# Number of trees in random forest
# from pprint import pprint
# n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# # Number of features to consider at every split
# max_features = ['auto', 'sqrt']
# # Maximum number of levels in tree
# max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
# max_depth.append(None)
# # Minimum number of samples required to split a node
# min_samples_split = [2, 5, 10]
# # Minimum number of samples required at each leaf node
# min_samples_leaf = [1, 2, 4]
# # Method of selecting samples for training each tree
# bootstrap = [True, False]# Create the random grid
# random_grid = {'n_estimators': n_estimators,
# 'max_features': max_features,
# 'max_depth': max_depth,
# 'min_samples_split': min_samples_split,
# 'min_samples_leaf': min_samples_leaf,
# 'bootstrap': bootstrap}
# pprint(random_grid)
# rf = RandomForestRegressor()
# # Random search of parameters, using 3 fold cross validation,
# # search across 100 different combinations, and use all available cores
# rf_random = RandomizedSearchCV(estimator = rf, param_distributions = random_grid, n_iter = 100, cv = 5, verbose=2, random_state=42, n_jobs = -1)# Fit the random search model
# rf.fit(x2, t_y)
# pprint(rf_random.best_params_)
| StarcoderdataPython |
1842456 | <reponame>ryutaro-0907/simple-health
"""Define routers for Record."""
from typing import List
from fastapi import APIRouter, Depends, status, HTTPException
from fastapi.responses import JSONResponse
from sqlalchemy.orm import Session
from ..use_cases import record_services
from ..entities.user import UserId
from ..entities.record import Record, RecordId, CreateRecord
from ..interfaces.db.database import get_db
router = APIRouter()
@router.post("/records", response_model=Record)
def create_record(request: CreateRecord, db: Session = Depends(get_db)):
try:
record = record_services.create_record(
db=db, request=request)
response = {
"code":201,
'id': record.id,
'message': 'record created.'
}
return JSONResponse(status_code=status.HTTP_201_CREATED, content=response)
except:
return JSONResponse(status_code=status.HTTP_400_BAD_REQUEST, content={
'code':400,
'message': 'internal error, could not create record.'
})
@router.get("/records/{user_id}", response_model=List[Record])
def get_records_by_user(user_id: UserId, db: Session = Depends(get_db)):
records: List[Record] = \
record_services.get_all_record_by_user(db=db, user_id=user_id)
if records is None:
raise HTTPException(status_code=404, detail="Records not found")
return records
| StarcoderdataPython |
3573709 | <gh_stars>1-10
import uuid
from django.db import models
class BaseAsset(models.Model):
id = models.UUIDField(
primary_key=True, null=False, editable=False, default=uuid.uuid4
)
identifier = models.CharField(max_length=255, null=False, blank=False)
class Meta:
abstract = True
class SimpleAsset(BaseAsset):
pass
class RelationType(models.Model):
pass
class AssetRelation(models.Model):
id = models.UUIDField(
primary_key=True, null=False, editable=False, default=uuid.uuid4
)
parent = models.ForeignKey(
SimpleAsset,
null=False,
on_delete=models.RESTRICT,
related_name="children_relations",
)
child = models.ForeignKey(
SimpleAsset,
null=False,
on_delete=models.RESTRICT,
related_name="parent_relations",
)
relation_type = models.ForeignKey(
RelationType, null=False, on_delete=models.RESTRICT
)
| StarcoderdataPython |
6541570 | <filename>odes.py
import numpy as np
from collections.abc import Iterable
from collections import deque
from functools import wraps
from typing import NamedTuple, Callable, Dict, Any, Tuple
from math import ceil
from itertools import count
class IvpSolution(NamedTuple):
length: int
ts: np.array
ys: np.array
class Ivp(NamedTuple):
f: Callable[[float, np.array], np.array]
t_0: float
y_0: np.array
problem_params: Dict[str, Any]
original_y_shape: Tuple[int]
def ivp(t_0: float, y_0: np.array, **problem_params):
s = y_0.shape
def unflatten_y(f):
@wraps(f)
def f_unflattened(t_0: float, y_0: np.array, **kwargs):
return f(t_0, y_0.reshape(s), **kwargs)
return f_unflattened
return lambda f: Ivp(unflatten_y(f), t_0, y_0.flatten(), problem_params, s)
def solve_ivp(solver: Callable[[Ivp, Any], IvpSolution], **solver_params):
def solve(ivp: Ivp):
@wraps(ivp.f)
def sol():
solution = solver(ivp, **solver_params)
# print((len(solution.ts), *ivp.original_y_shape))
actual_ys = np.array(solution.ys).reshape((
len(solution.ts), *ivp.original_y_shape))
return IvpSolution(solution.length, solution.ts, actual_ys)
return sol
return solve
def eulers_method(ivp: Ivp, t_end, step_size):
t_0 = ivp.t_0
steps = ceil(((t_end - t_0) / step_size))
ts_ys = np.zeros((steps + 1, ivp.y_0.size + 1))
ts_ys[0, :] = np.array((ivp.t_0, *ivp.y_0))
f = ivp.f
p = ivp.problem_params
for k in range(steps):
y_k = ts_ys[k, 1:]
t_k1 = t_0 + (k+1) * step_size
y_k1 = y_k + step_size * f(t_k1, y_k, **p)
ts_ys[k+1, 0] = t_k1
ts_ys[k+1, 1:] = y_k1
ts = ts_ys[:, 0]
ys = ts_ys[:, 1:]
return IvpSolution(len(ts), ts, ys)
def explicit_rk4(ivp: Ivp, t_end, step_size):
""" Explicit classic 4-th order runge-kutta method """
t_0 = ivp.t_0
steps = ceil((t_end - t_0) / step_size)
# first column is time, remainder is y
ts_ys = np.zeros((steps + 1, ivp.y_0.size + 1))
ts_ys[0, :] = np.array((ivp.t_0, *ivp.y_0))
f = ivp.f
p = ivp.problem_params
for k in range(steps):
t_k1 = t_0 + (k + 1) * step_size
y_k = ts_ys[k, 1:]
k_1 = f(t_k1, y_k, **p)
k_2 = f(t_k1 + step_size / 2, y_k + step_size / 2 * k_1, **p)
k_3 = f(t_k1 + step_size / 2, y_k + step_size / 2 * k_2, **p)
k_4 = f(t_k1 + step_size, y_k + step_size * k_3, **p)
y_k1 = y_k + step_size * (k_1 + 2 * k_2 + 2 * k_3 + k_4) / 6
ts_ys[k+1, 0] = t_k1
ts_ys[k+1, 1:] = y_k1
ts = ts_ys[:, 0]
ys = ts_ys[:, 1:]
return IvpSolution(len(ts), ts, ys)
def dopri54(ivp: Ivp, t_end, step_size_0, eps_target):
""" DOPRI 5(4)
Dormand-Prince method:
Embedded ODE solver based on 4th/5th order explicit rk4s. Tries to control
the step size in such a way that the error is absolutely bounded by eps_target.
"""
t_0 = np.array(ivp.t_0)
# first column is time, remainder is y
ts = deque([t_0])
ys = deque([ivp.y_0])
f = ivp.f
p = ivp.problem_params
step_size = step_size_0
alpha = 0.9
order = 5
t_k = t_0
exit_condition = False
for k in count(1):
t_k1 = t_k + step_size
y_k = ys[-1]
k_1 = f(t_k1, y_k, **p)
k_2 = f(t_k1 + 1/5 * step_size, y_k + step_size * 1/5 * k_1, **p)
k_3 = f(t_k1 + 3/10 * step_size, y_k +
step_size * (3/40 * k_1 + 9/40 * k_2), **p)
k_4 = f(t_k1 + 4/5 * step_size, y_k + step_size *
(44/45 * k_1 - 56/15 * k_2 + 32/9 * k_3), **p)
k_5 = f(t_k1 + 8/9 * step_size, y_k + step_size * (
19372/6561 * k_1 - 25360/2187 * k_2 + 64448/6561 * k_3 - 212/729 * k_4), **p)
k_6 = f(t_k1 + step_size, y_k + step_size * (
9017/3168 * k_1 - 355/33 * k_2 + 46732/5247 * k_3 + 49/176 * k_4 - 5103/18656 * k_5), **p)
k_7 = f(t_k1 + step_size, y_k + step_size * (
35/384 * k_1 + 500/1113 * k_3 + 125/192 * k_4 - 2187/6784 * k_5 + 11/84 * k_6), **p)
y_k1_5 = y_k + step_size * (
35/384 * k_1 + 500/1113 * k_3 + 125 /
192 * k_4 - 2187/6784 * k_5 + 11/84 * k_6)
y_k1_4 = y_k + step_size * (
5179/57600 * k_1 + 7571/16695 * k_3 +
393/640 * k_4 - 92097/339200 * k_5 + 187/2100 * k_6 + 1/40 * k_7)
error_estimate = np.linalg.norm(y_k1_5 - y_k1_4)
if error_estimate != 0:
step_size = max(min(5*step_size, alpha * step_size *
(eps_target / abs(error_estimate))**(1/order)), 0.001)
ts.append(np.array(t_k1))
ys.append(y_k1_5)
t_k = t_k1
if exit_condition:
break
if t_k1 + step_size >= t_end:
step_size = t_end - t_k1
exit_condition = True
if step_size <= 0:
raise ValueError("Internal error: negative step size")
n = len(ts)
ts = np.vstack(ts).reshape((n,))
ys = np.vstack(np.array(ys))
return IvpSolution(n, ts, ys)
| StarcoderdataPython |
208034 | <gh_stars>1-10
import contextlib
import os
import shutil
import pytest
from transform.msigdb.transform import transform
@pytest.fixture
def input_xml(request):
return os.path.join(request.fspath.dirname, 'source/msigdb/msigdb_v6.2.xml')
def validate(helpers, emitter_directory, input_xml):
""" run xform and test results"""
geneset_file = os.path.join(emitter_directory, 'GeneSet.Vertex.json.gz')
gs_gene_edge = os.path.join(emitter_directory, 'GeneSet_Genes_Gene.Edge.json.gz')
gene_gs_edge = os.path.join(emitter_directory, 'Gene_GeneSets_GeneSet.Edge.json.gz')
gs_publication_edge = os.path.join(emitter_directory, 'GeneSet_Publications_Publication.Edge.json.gz')
publication_gs_edge = os.path.join(emitter_directory, 'Publication_GeneSets_GeneSet.Edge.json.gz')
all_files = [
# vertices
geneset_file,
# edges
gs_gene_edge, gene_gs_edge,
gs_publication_edge, publication_gs_edge
]
# remove output
with contextlib.suppress(FileNotFoundError):
shutil.rmtree(emitter_directory)
# create output
transform(
input_path=input_xml,
emitter_prefix=None,
emitter_directory=emitter_directory
)
for f in all_files:
if "Vertex.json.gz" in f:
helpers.assert_vertex_file_valid(f)
elif "Edge.json.gz" in f:
helpers.assert_edge_file_valid(f)
helpers.assert_edge_joins_valid(
all_files,
exclude_labels=["Gene", "Publication"]
)
def test_simple(helpers, emitter_directory, input_xml):
validate(helpers, emitter_directory, input_xml)
| StarcoderdataPython |
8018984 | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from PIL import ImageTk, Image
import sqlite3
class userwindow:
sqlite_var = 0 #variable to establish connection btw python and sqlite3
theCursor = 0 #variable to store the indexing cursor
curItem=0 #variable to store currently active record
def refresh(self):
self.update_tree()
self.clear_entries()
print("Refreshed")
def search_record(self):
try:
self.tree.delete(*self.tree.get_children())
self.theCursor.execute("select * from Students where name like ? or phone like ?",('%'+self.search_value.get()+'%','%'+self.search_value.get()+'%'))
self.result = self.theCursor.fetchall()
length=str(len(self.result))
if(length==0):
messagebox.showinfo("Search Results","No results were found, try again using part of name or phone no")
if(length!='0'):
i=0
for row in self.result:
if(i%2==0):
self.tree.insert("",END, values=row,tag='1')
else:
self.tree.insert("",END, values=row,tag='2')
i=i+1
except:
raise
print("Couldn't search Data")
def clear_entries(self):
self.search_value.set("")
def update_tree(self):
try:
self.tree.delete(*self.tree.get_children())
self.theCursor.execute("SELECT * FROM Students")
self.rows = self.theCursor.fetchall()
i=0
for row in self.rows:
if(i%2==0):
self.tree.insert("",END, values=row,tag='1')
else:
self.tree.insert("",END, values=row,tag='2')
i=i+1
except:
print("Couldn't Update Data")
def setup_db(self):
try:
self.sqlite_var = sqlite3.connect('student.db')
self.theCursor = self.sqlite_var.cursor()
except:
print("Could not establish connection to sqlite3")
try:
self.theCursor.execute("CREATE TABLE if not exists Students(ID INTEGER PRIMARY KEY AUTOINCREMENT , Name TEXT UNIQUE NOT NULL , Phone TEXT NOT NULL,Address TEXT NOT NULL);")
except:
print("ERROR : Table not created")
finally:
self.sqlite_var.commit()
self.update_tree()
def __init__(self):
self.user_window=Tk()
self.user_window.resizable(False, False)
self.user_window.iconbitmap("logo.ico")
self.user_window.title("REGISTRX Database - User")
# ----- 5th Row -----
self.tree= ttk.Treeview(self.user_window,selectmode="browse",column=("column1", "column2", "column3","column4"), show='headings')
self.tree.column("column1",width=100,minwidth=100,stretch=NO)
self.tree.heading("#1", text="ADMISSION")
self.tree.column("column2",width=180,minwidth=180,stretch=NO)
self.tree.heading("#2", text="NAME")
self.tree.column("column3",width=180,minwidth=180,stretch=NO)
self.tree.heading("#3", text="PHONE")
self.tree.column("column4",width=450,minwidth=450,stretch=NO)
self.tree.heading("#4", text="ADDRESS")
self.tree.tag_configure('1', background='ivory2')
self.tree.tag_configure('2', background='alice blue')
self.tree.grid(row=4,column=0,columnspan=4,sticky=W+E,padx=9,pady=9)
Label(self.user_window,text="Search by Part of NAME or PHONE no").grid(row=5,column=0,columnspan=2,pady=9,padx=9,sticky=E)
self.search_value = StringVar(self.user_window, value="")
Entry(self.user_window,textvariable=self.search_value).grid(row=5,column=2,pady=9,padx=9,sticky=W+E)
self.search_button = ttk.Button(self.user_window,text="Search",command=self.search_record)
self.search_button.grid(row=5,column=3,pady=9,padx=9,sticky=W+E)
self.refresh_button = ttk.Button(self.user_window,text="Refresh",command=self.refresh)
self.refresh_button.grid(row=6, column=2,padx=9,pady=9,sticky=W+E)
self.setup_db()
self.user_window.mainloop()
class adminwindow:
sqlite_var = 0 #variable to establish connection btw python and sqlite3
theCursor = 0 #variable to store the indexing cursor
curItem=0 #variable to store currently active record
def refresh(self):
self.update_tree()
self.clear_entries()
print("Refreshed")
def search_record(self):
try:
self.tree.delete(*self.tree.get_children())
self.theCursor.execute("select * from Students where name like ? or phone like ?",('%'+self.search_value.get()+'%','%'+self.search_value.get()+'%'))
self.result = self.theCursor.fetchall()
length=str(len(self.result))
if(length==0):
messagebox.showinfo("Search Results","No results were found, try again using part of name or phone no")
if(length!='0'):
i=0
for row in self.result:
if(i%2==0):
self.tree.insert("",END, values=row,tag='1')
else:
self.tree.insert("",END, values=row,tag='2')
i=i+1
except:
raise
print("Couldn't search Data")
def reset_db(self):
yesno=messagebox.askquestion("RESET DB","All data in DB will be lost, continue ?")
if(yesno=='yes'):
self.theCursor.execute("DROP TABLE Students")
print("Database Reseted")
self.setup_db()
self.update_tree()
def clear_entries(self):
self.Name_entry.delete(0, "end")
self.Phone_no_entry.delete(0, "end")
self.Address_entry.delete(0, "end")
def delete_record(self):
try:
self.theCursor.execute("delete FROM Students WHERE ID=?",(self.curItem['values'][0],))
print("Deleted")
except:
print("Delete Failed !")
finally:
self.curItem=0
self.clear_entries()
self.update_tree()
self.sqlite_var.commit()
def update_record(self):
if(self.Name_value.get()!="" and self.Address_value.get()!="" and self.Phone_no_value.get()!=""):
try:
self.theCursor.execute("""UPDATE Students SET Name = ? ,Phone = ?,Address = ? WHERE ID= ? """,
(self.Name_value.get(),self.Phone_no_value.get(),self.Address_value.get(),self.curItem['values'][0]))
print("Update Records")
except sqlite3.IntegrityError:
messagebox.showerror("Duplicate","The Name already exists in the database")
except:
print("Update Failed due to unkown reason !")
finally:
self.update_tree()
self.sqlite_var.commit()
else:
messagebox.showwarning("EMPTY INPUT","PLEASE FILL ALL REQUIRED DATA BEFORE UPDATING")
def selectItem(self,event):
self.curItem = self.tree.item(self.tree.focus())
print(self.curItem)
self.Name_value.set(self.curItem["values"][1])
self.Phone_no_value.set(self.curItem["values"][2])
self.Address_value.set(self.curItem["values"][3])
def update_tree(self):
try:
self.tree.delete(*self.tree.get_children())
self.theCursor.execute("SELECT * FROM Students")
self.rows = self.theCursor.fetchall()
i=0
for row in self.rows:
if(i%2==0):
self.tree.insert("",END, values=row,tag='1')
else:
self.tree.insert("",END, values=row,tag='2')
i=i+1
except:
print("Couldn't Update Data")
def write_record(self):
if(self.Name_value.get()!="" and self.Address_value.get()!="" and self.Phone_no_value.get()!=""):
try:
self.theCursor.execute("""INSERT INTO Students (Name, Phone,Address) VALUES (?,?,?)""",
(self.Name_value.get(),self.Phone_no_value.get(),self.Address_value.get()))
self.sqlite_var.commit()
self.theCursor.execute("SELECT *,max(id) FROM Students")
self.rows=self.theCursor.fetchall()
print(self.rows[0][0],"{Name : ",self.rows[0][1],"| No : ",self.rows[0][2],"| Address :",self.rows[0][3],"} was ADDED ! ")
self.clear_entries()
except sqlite3.IntegrityError:
messagebox.showerror("Duplicate","The Name already exists in the database")
except:
print("Data writing failed due to unknown reason")
finally:
self.update_tree()
else:
messagebox.showwarning("EMPTY INPUT","PLEASE FILL ALL REQUIRED DATA BEFORE SUBMITTING")
def setup_db(self):
try:
self.sqlite_var = sqlite3.connect('student.db')
self.theCursor = self.sqlite_var.cursor()
except:
print("Could not establish connection to sqlite3")
try:
self.theCursor.execute("CREATE TABLE if not exists Students(ID INTEGER PRIMARY KEY AUTOINCREMENT , Name TEXT UNIQUE NOT NULL , Phone TEXT NOT NULL,Address TEXT NOT NULL);")
except:
print("ERROR : Table not created")
finally:
self.sqlite_var.commit()
self.update_tree()
def __init__(self):
self.admin_window=Tk()
self.admin_window.resizable(False, False)
self.admin_window.iconbitmap("logo.ico")
self.admin_window.title("REGISTRX Database - admin")
# ----- 1st Row -----
self.Name_Label = Label(self.admin_window, text="Name")
self.Name_Label.grid(row=0, column=0, padx=10, pady=10, sticky=W)
# Will hold the changing value stored first name
self.Name_value = StringVar(self.admin_window, value="")
self.Name_entry = ttk.Entry(self.admin_window,textvariable=self.Name_value)
self.Name_entry.grid(row=0, column=1,columnspan=2,padx=10, pady=10,sticky=W+E)
# ----- 2nd Row -----
self.Phone_Label = Label(self.admin_window, text="Phone No.")
self.Phone_Label.grid(row=1, column=0, padx=10, pady=10, sticky=W)
# Will hold the changing value stored last name
self.Phone_no_value = StringVar(self.admin_window, value="")
self.Phone_no_entry = ttk.Entry(self.admin_window,textvariable=self.Phone_no_value)
self.Phone_no_entry.grid(row=1, column=1,columnspan=2, padx=10, pady=10, sticky=W+E)
# ----- 3rd Row -----
self.Adress_Label = Label(self.admin_window, text="Address")
self.Adress_Label.grid(row=2, column=0, padx=10, pady=10, sticky=W)
# Will hold the changing value stored last name
self.Address_value = StringVar(self.admin_window, value="")
self.Address_entry = ttk.Entry(self.admin_window,textvariable=self.Address_value)
self.Address_entry.grid(row=2, column=1,columnspan=2, padx=10, pady=10, sticky=W+E)
# ----- 4rd Row -----
self.submit_button = ttk.Button(self.admin_window,text="Submit",command=self.write_record)
self.submit_button.grid(row=0, column=3,padx=9, sticky=W+E)
self.update_button = ttk.Button(self.admin_window,text="Update",command=self.update_record)
self.update_button.grid(row=1, column=3,padx=9, sticky=W+E)
self.delete_button = ttk.Button(self.admin_window,text="Delete",command=self.delete_record)
self.delete_button.grid(row=2, column=3,padx=9, sticky=W+E)
# ----- 5th Row -----
self.tree= ttk.Treeview(self.admin_window,selectmode="browse",column=("column1", "column2", "column3","column4"), show='headings')
self.tree.column("column1",width=100,minwidth=100,stretch=NO)
self.tree.heading("#1", text="ADMISSION")
self.tree.column("column2",width=180,minwidth=180,stretch=NO)
self.tree.heading("#2", text="NAME")
self.tree.column("column3",width=180,minwidth=180,stretch=NO)
self.tree.heading("#3", text="PHONE")
self.tree.column("column4",width=450,minwidth=450,stretch=NO)
self.tree.heading("#4", text="ADDRESS")
self.tree.bind("<ButtonRelease-1>",self.selectItem)
self.tree.bind("<space>",self.selectItem)
self.tree.tag_configure('1', background='ivory2')
self.tree.tag_configure('2', background='alice blue')
self.tree.grid(row=4,column=0,columnspan=4,sticky=W+E,padx=9,pady=9)
Label(self.admin_window,text="Search by Part of NAME or PHONE no").grid(row=5,column=0,columnspan=2,pady=9,padx=9,sticky=E)
self.search_value = StringVar(self.admin_window, value="")
Entry(self.admin_window,textvariable=self.search_value).grid(row=5,column=2,pady=9,padx=9,sticky=W+E)
self.search_button = ttk.Button(self.admin_window,text="Search",command=self.search_record)
self.search_button.grid(row=5,column=3,pady=9,padx=9,sticky=W+E)
self.refresh_button = ttk.Button(self.admin_window,text="Refresh",command=self.refresh)
self.refresh_button.grid(row=6, column=2,padx=9,pady=9,sticky=W+E)
Label(self.admin_window,text="Developed by <NAME>").grid(row=6,column=0,pady=9,padx=9,sticky=W)
self.reset_button = ttk.Button(self.admin_window,text="Reset Database",command=self.reset_db)
self.reset_button.grid(row=6, column=3,padx=9,pady=9,sticky=W+E)
self.setup_db()
self.admin_window.mainloop()
class signinwindow:
sqlite_var = 0 #variable to establish connection btw python and sqlite3
theCursor = 0 #variable to store the indexing cursor
curItem=0 #variable to store currently active record
def setup_db(self):
try:
self.sqlite_var = sqlite3.connect('user.db')
self.theCursor = self.sqlite_var.cursor()
except:
print("Could not establish connection to sqlite3")
try:
self.theCursor.execute("CREATE TABLE if not exists users(username TEXT NOT NULL UNIQUE,password TEXT NOT NULL);")
except:
print("ERROR : Table not created")
finally:
self.sqlite_var.commit()
def user_tree_update(self):
self.tree.delete(*self.tree.get_children())
self.theCursor.execute("SELECT * FROM users")
res = self.theCursor.fetchall()
i=0
for row in res:
if(i%2==0):
self.tree.insert("",END, values=row,tag='1')
else:
self.tree.insert("",END, values=row,tag='2')
i=i+1
def clear_users(self):
self.theCursor.execute("DROP TABLE users")
self.setup_db()
self.user_tree_update()
def view_users(self):
try:
self.theCursor.execute("SELECT * from users")
res=self.theCursor.fetchall()
except:
print("Couldnt read from users")
self.x=Tk()
self.x.title("USERS LIST")
self.x.resizable(False, False)
self.x.iconbitmap("logo.ico")
self.tree= ttk.Treeview(self.x,selectmode="browse",column=("column1", "column2"), show='headings')
self.tree.heading("#1", text="USERNAME")
self.tree.heading("#2", text="PASSWORD")
self.tree.tag_configure('1', background='yellow')
self.tree.tag_configure('2', background='light green')
self.tree.grid(row=0,column=0,columnspan=4,sticky=W+E,padx=9,pady=9)
Button(self.x,text="Clear all users",command=self.clear_users).grid(row=1,column=0,columnspan=4,padx=9,pady=9,sticky=W+E)
self.user_tree_update()
self.x.mainloop()
def new_user(self):
try:
if(self.username_text.get()!="" and self.password_text.get()!=""):
self.theCursor.execute("INSERT INTO users (username,password) VALUES(?,?)",(self.username_text.get(),self.password_text.get()))
self.signin_window.destroy()
messagebox.showinfo("NEW USER ADDED","New User Added successfully")
else:
messagebox.showwarning("EMPTY INPUT","Username or password missing")
except sqlite3.IntegrityError:
messagebox.showerror("DUPLICATE","Username already exists")
except:
print("Couln't add user")
finally:
self.sqlite_var.commit()
self.theCursor.execute("SELECT * from users")
res=self.theCursor.fetchall()
self.username_text.set("")
self.password_text.set("")
def __init__(self):
self.signin_window=Toplevel()
self.signin_window.title("SIGN-UP")
self.signin_window.resizable(False, False)
self.signin_window.iconbitmap("logo.ico")
self.password_text=StringVar()
self.username_text=StringVar()
Label(self.signin_window,text="Sign-Up",font="Ariel").grid(row=0,column=0,sticky=W,pady=10)
Label(self.signin_window,text="Username : ",font="Ariel, 12").grid(row=1,column=0)
Label(self.signin_window,text="Password : ",font="<PASSWORD>, 12").grid(row=2,column=0,pady=(0,20))
Entry(self.signin_window,font="Ariel, 10",textvariable=self.username_text).grid(row=1,column=1)
Entry(self.signin_window,font="Ariel, 10",textvariable=self.password_text).grid(row=2,column=1,pady=(0,20))
user_add=Button(self.signin_window,font="Ariel, 17",text="Add User",background='white',command=self.new_user)
user_add.grid(row=1,column=2,rowspan=2,padx=20,pady=(0,20))
view_existing=Button(self.signin_window,text="View Existing Users",background='white',command=self.view_users)
view_existing.grid(row=3,column=0,columnspan=4,padx=20,pady=(0,20),sticky=W+E)
self.setup_db()
self.signin_window.mainloop()
class loginwindow:
sqlite_var = 0 #variable to establish connection btw python and sqlite3
theCursor = 0 #variable to store the indexing cursor
curItem=0 #variable to store currently active record
def setup_db(self):
try:
self.sqlite_var = sqlite3.connect('user.db')
self.theCursor = self.sqlite_var.cursor()
except:
print("Could not establish connection to sqlite3")
try:
self.theCursor.execute("CREATE TABLE if not exists users(username TEXT NOT NULL,password TEXT NOT NULL);")
except:
print("ERROR : Table not created")
finally:
self.sqlite_var.commit()
def logg(self):
try:
self.theCursor.execute("SELECT * from users")
res=self.theCursor.fetchall()
flag=0
for x in res:
if(self.var.get()==1 and self.username_text.get()==x[0] and self.password_text.get()==x[1]):
self.login_window.destroy()
userwindow()
flag=1
if(self.var.get()==2 and self.username_text.get()=="admin" and self.password_text.get()=="<PASSWORD>"):
self.login_window.destroy()
adminwindow()
flag=1
if(flag==0):
messagebox.showinfo("INVALID","INVALID USERNAME OR PASSWORD")
except:
print("ERROR while logging in")
raise
finally:
self.password_text.set("")
self.username_text.set("")
def __init__(self):
self.login_window=Toplevel()
self.login_window.title("LOGIN")
self.login_window.resizable(False, False)
self.login_window.iconbitmap("logo.ico")
self.password_text=StringVar(self.login_window)
self.username_text=StringVar(self.login_window)
self.var=IntVar(self.login_window)
Label(self.login_window,text="LOGIN",font="Ariel").grid(row=0,column=0,sticky=W,pady=10)
Label(self.login_window,text="Username : ",font="Ariel, 12").grid(row=1,column=0)
Label(self.login_window,text="Password : ",font="Ariel, 12").grid(row=2,column=0)
self.username=Entry(self.login_window,font="Ariel, 10",textvariable=self.username_text).grid(row=1,column=1)
self.password=Entry(self.login_window,font="Ariel, 10",textvariable=self.password_text,show='*').grid(row=2,column=1)
self.but=Button(self.login_window,font="Ariel, 17",text="GO",command=self.logg)
self.but.grid(row=1,column=2,rowspan=2,padx=20)
self.var.set(1)
Radiobutton(self.login_window,text="User",variable=self.var,value=1).grid(row=3,column=0,pady=15)
Radiobutton(self.login_window,text="Admin",variable=self.var,value=2).grid(row=3,column=1)
Label(self.login_window,text="DEFAULT ADMIN\t{ Username : 'admin' and Password : '<PASSWORD>' }",font="Ariel, 7").grid(row=4,column=0,columnspan=3,sticky=W,pady=(5,0),padx=(3,9))
Label(self.login_window,text="To login as user Sign-Up from file menu of main window",font="Ariel, 7").grid(row=5,column=0,columnspan=3,sticky=W,pady=(0,5),padx=(3,9))
self.setup_db()
self.login_window.mainloop()
class mainwindow():
def about_us(self):
messagebox.showinfo("About Us ","""This simple application to facilitate management of student details was created by
\n\n<NAME>\n\n<NAME>\n\n<NAME>\n\n<NAME>\n\n<NAME>\n\n\n""")
def create_login(self):
try:
loginwindow()
except:
raise Exception("COULD NOT OPEN LOGIN_WINDOW")
def create_signin(self):
try:
signinwindow()
except:
raise Exception("COULD NOT OPEN SIGNUP_WINDOW")
def helpp(self):
import os,webbrowser
from urllib.request import pathname2url
url = 'file:{}'.format(pathname2url(os.path.abspath('help.html')))
webbrowser.open(url)
def quit_window(self):
if messagebox.askokcancel("Quit", "Do you want to quit?"):
self.root.destroy()
def __init__(self):
self.root=Tk()
self.root.resizable(False, False)
self.root.protocol("WM_DELETE_WINDOW",self.quit_window)
self.root.iconbitmap("logo.ico")
self.root.title("REGISTRX")
self.img = ImageTk.PhotoImage(Image.open("logo.png"))
self.panel = Label(self.root, image = self.img)
self.panel.grid(row=0,column=0)
Label(self.root,text="REGISTRX",font="Times, 20",foreground='blue').grid(row=1,column=0,sticky=W+E,padx=40)
Label(self.root,text="STUDENT REGISTRATION APP",font="Times, 29",foreground='red4').grid(row=2,column=0,sticky=W+E,padx=40)
Label(self.root,text="This app offers an easy way to manage student records",
font="Ariel, 12").grid(row=3,column=0,columnspan=2,sticky=W+E,pady=30)
Label(self.root,text="PLEASE LOGIN TO CONTINUE",
font="Ariel, 18").grid(row=4,column=0,columnspan=2,sticky=W+E,pady=5)
self.but=Button(self.root,text="LOGIN",command=self.create_login)
self.but.configure(width=18,height=2,foreground="white",background="orange4")
self.but.grid(row=5,column=0,columnspan=2,sticky='N',pady=30)
self.menu_bar=Menu(self.root)
self.menu_bar.add_separator()
self.file_menu=Menu(self.menu_bar,tearoff=0)
self.file_menu.add_command(label="Sign-up",command=self.create_signin)
self.file_menu.add_separator()
self.file_menu.add_command(label="Quit",command=self.quit_window)
self.menu_bar.add_cascade(label="File",menu=self.file_menu)
self.menu_bar.add_separator()
self.help_menu=Menu(self.menu_bar,tearoff=0)
self.help_menu.add_command(label="Help",command=self.helpp)
self.help_menu.add_separator()
self.help_menu.add_command(label="About Us",command=self.about_us)
self.menu_bar.add_cascade(label="Help",menu=self.help_menu)
self.root.config(menu=self.menu_bar)
self.root.mainloop()
try:
mainwindow()
except:
raise Exception("COULD NOT CREATE MAIN_WINDOW")
| StarcoderdataPython |
4827147 | <filename>tests/python/profiling/test_nvtx.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pytest
import mxnet as mx
import sys
from subprocess import Popen, PIPE
@pytest.mark.skipif(not mx.device.num_gpus(), reason='Test only applicable to machines with GPUs')
def test_nvtx_ranges_present_in_profile():
# Build a system independent wrapper to execute simple_forward with nvprof
# This requires nvprof to be on your path (which should be the case for most GPU workstations with cuda installed).
simple_forward_path = os.path.realpath(__file__)
simple_forward_path = simple_forward_path.replace('test_nvtx', 'simple_forward')
process = Popen(["nvprof", sys.executable, simple_forward_path], stdout=PIPE, stderr=PIPE)
(output, profiler_output) = process.communicate()
process.wait()
profiler_output = profiler_output.decode('ascii')
# Verify that some of the NVTX ranges we should have created are present
# Verify that we have NVTX ranges for our simple operators.
assert "Range \"FullyConnected\"" in profiler_output
assert "Range \"_zeros\"" in profiler_output
# Verify that we have some expected output from the engine.
assert "Range \"WaitForVar\"" in profiler_output
| StarcoderdataPython |
1784593 | <filename>alipay/aop/api/domain/AuthFieldSceneDTO.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AuthFieldSceneDTO(object):
def __init__(self):
self._scene_code = None
self._scene_desc = None
@property
def scene_code(self):
return self._scene_code
@scene_code.setter
def scene_code(self, value):
self._scene_code = value
@property
def scene_desc(self):
return self._scene_desc
@scene_desc.setter
def scene_desc(self, value):
self._scene_desc = value
def to_alipay_dict(self):
params = dict()
if self.scene_code:
if hasattr(self.scene_code, 'to_alipay_dict'):
params['scene_code'] = self.scene_code.to_alipay_dict()
else:
params['scene_code'] = self.scene_code
if self.scene_desc:
if hasattr(self.scene_desc, 'to_alipay_dict'):
params['scene_desc'] = self.scene_desc.to_alipay_dict()
else:
params['scene_desc'] = self.scene_desc
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AuthFieldSceneDTO()
if 'scene_code' in d:
o.scene_code = d['scene_code']
if 'scene_desc' in d:
o.scene_desc = d['scene_desc']
return o
| StarcoderdataPython |
4824589 | import os
import shutil
from util import file_util
from util.frontend.normalize_lab_for_merlin import normalize_label_files
MerlinDir = "merlin"
frontend = os.path.join(MerlinDir, "misc", "scripts", "frontend")
ESTDIR = os.path.join(MerlinDir, "tools", "speech_tools")
FESTDIR = os.path.join(MerlinDir, "tools", "festival")
FESTVOXDIR = os.path.join(MerlinDir, "tools", "festvox")
engdataset = "slt_arctic"
clustergen = "%s setup_cg cmu us %s" % (os.path.join(FESTVOXDIR, "src", "clustergen"), engdataset)
file_util.copy_filepath("../cmuarctic.data", " etc/txt.done.data")
file_util.copy_filepath("../cmuarctic.data", " etc/txt.done.data")
slt_wavs = file_util.read_file_list_from_path("../slt_wav/", file_type=".wav")
for wav in slt_wavs:
shutil.copy(wav, os.path.join("wav", os.path.basename(wav)))
os.system("./bin/do_build build_prompts")
os.system("./bin/do_build label")
os.system("./bin/do_build build_utts")
'''
cd ../
cat cmuarctic.data | cut -d " " -f 2 > file_id_list.scp
'''
scp_file_id_list = ""
make_label_cmd = "%s full-context-labels %s %s %s" % \
(os.path.join(frontend, "festival_utt_to_lab", "make_labels"), "cmu_us_slt_arctic/festival/utts",
os.path.join(FESTDIR, "examples", "dumpfeats"), os.path.join(frontend, "festival_utt_to_lab"))
os.system(make_label_cmd)
in_lab_dir = "full-context-labels/full"
out_lab_dir = "label_phone_align"
label_style = "phone_align "
file_id_list = file_util.read_file_by_line(scp_file_id_list)
write_time_stamps = True
for id in file_id_list:
filename = id.strip() + '.lab'
print(filename)
in_lab_file = os.path.join(in_lab_dir, filename)
out_lab_file = os.path.join(out_lab_dir, filename)
normalize_label_files(in_lab_file, out_lab_file, label_style, write_time_stamps)
| StarcoderdataPython |
3456577 | <reponame>avs123/Farmers-Portal<filename>home/views.py
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.db.models import Count
from django.db import connection, transaction
from post.models import POST_CATEGORIES
from User.models import check_if_auth_user
from collections import namedtuple
def namedtuplefetchall(cursor):
"Return all rows from a cursor as a namedtuple"
desc = cursor.description
nt_result = namedtuple('Result', [col[0] for col in desc])
return [nt_result(*row) for row in cursor.fetchall()]
# Defining queries for the index page
QUERY_DICT = {
1 : """
SELECT name as Name, user_id as Email FROM User_farmer
WHERE `User_farmer`.'name' LIKE %s or `User_farmer`.'location_id' in
(SELECT `location_location`.loc_id FROM location_location
WHERE `location_location`.city LIKE %s or `location_location`.state LIKE %s)""",
2 : """
SELECT title as Title, timestamp as PostedOn, post_id as Link FROM post_post
WHERE `post_post`.'title' LIKE %s or
`post_post`.'description' LIKE %s or `post_post`.'post_id' = %s""",
3 : """
SELECT name as CropName, family as Family FROM crop_crop
WHERE `crop_crop`.'name' LIKE %s or
`crop_crop`.'family' LIKE %s or `crop_crop`.'crop_id' = %s""",
4 : """
SELECT name as Name, user_id as Email FROM User_farmer
WHERE `User_farmer`.'location_id' = %s""",
5 : """
SELECT `crop_crop`.name as CropName, `crop_crop`.family as Family,
`User_farmer`.name as FarmerName, `User_farmer`.'user_id' as Email,
`crop_disease`.name as Disease, `crop_disease`.category as Category
FROM crop_crop
INNER JOIN `crop_cropfarmer` ON `crop_cropfarmer`.'crop_id' = `crop_crop`.'crop_id'
INNER JOIN `User_farmer` ON `User_farmer`.'auto_id' = `crop_cropfarmer`.'farmer_id'
INNER JOIN `crop_disease` ON `crop_disease`.'dis_id' = `crop_cropfarmer`.'disease_id'
""",
6 : """
SELECT `crop_fertilizer`.'name' as Fertilizer, `crop_nutrient`.'name' as Nutrient, `crop_nutrient`.'nut_type' as NutrientType
FROM crop_fertiprovide
INNER JOIN crop_nutrient ON `crop_fertiprovide`.nutrient_id = `crop_nutrient`.'nut_id'
INNER JOIN crop_fertilizer ON `crop_fertiprovide`.ferti_id = `crop_fertilizer`.'ferti_id'
ORDER BY Fertilizer""",
}
# Create your views here.
def index_page(request):
check = check_if_auth_user(request)
current_user = None
user_class = None
if check:
cursor = connection.cursor()
user_class = request.session["user_class"]
if user_class == 'E':
query = "SELECT * FROM User_expert WHERE `User_expert`.'user_id' = %s"
else:
query = "SELECT * FROM User_farmer WHERE `User_farmer`.'user_id' = %s"
cursor.execute(query, [check, ])
result = namedtuplefetchall(cursor)
current_user = result[0]
cursor = connection.cursor()
cursor.execute("SELECT * FROM post_post ORDER BY `post_post`.'timestamp' desc")
result = namedtuplefetchall(cursor)
context_data = {
"all_posts" : result,
"user" : current_user,
"user_class": user_class,
}
return render(request, "index.html" , context_data)
def query_resolve(request, id=None):
if not QUERY_DICT.has_key(int(id)):
messages.error(request, "Wrong query given. Please try again.")
return redirect("home:welcome")
check = check_if_auth_user(request)
current_user = None
user_class = None
if check:
cursor = connection.cursor()
user_class = request.session["user_class"]
if user_class == 'E':
query = "SELECT * FROM User_expert WHERE `User_expert`.'user_id' = %s"
else:
query = "SELECT * FROM User_farmer WHERE `User_farmer`.'user_id' = %s"
cursor.execute(query, [check, ])
result = namedtuplefetchall(cursor)
current_user = result[0]
if not current_user:
messages.error(request, "Please login first.")
return redirect("home:welcome")
result = None
result_header = None
result_farmers = None
if int(id) > 3:
if user_class == 'F':
messages.error(request, "Only experts allowed for these queries.")
return redirect("home:welcome")
query = QUERY_DICT[int(id)]
if int(id) == 4:
query = query % current_user.location_id
cursor.execute(query)
result = namedtuplefetchall(cursor)
result_header = [col[0] for col in cursor.description]
context_data = {
"user" : current_user,
"user_class": user_class,
"query_id": id,
"result_header": result_header,
"result": result,
"result_farmers": result_farmers,
}
return render(request, "query.html" , context_data)
def search_database(request, id):
check = check_if_auth_user(request)
current_user = None
user_class = None
if check:
cursor = connection.cursor()
user_class = request.session["user_class"]
if user_class == 'E':
query = "SELECT * FROM User_expert WHERE `User_expert`.'user_id' = %s"
else:
query = "SELECT * FROM User_farmer WHERE `User_farmer`.'user_id' = %s"
cursor.execute(query, [check, ])
result = namedtuplefetchall(cursor)
current_user = result[0]
if not current_user:
messages.error(request, "Please login first.")
return redirect("home:welcome")
if int(id) > 3:
messages.error(request, "This is not a searchable query. Redirecting.")
return redirect(reverse("home:query", kwargs={"id":id}))
search_query = request.GET.get('search_query')
if not search_query:
messages.error(request, "Enter a suitable query. Try again")
return redirect(reverse("home:query", kwargs={"id":id}))
search_query += '%'
# '%' because it will search for a pattern using LIKE op
cursor.execute(QUERY_DICT[int(id)], [search_query, search_query, search_query])
result = namedtuplefetchall(cursor)
result_header = [col[0] for col in cursor.description]
if not result:
messages.error(request, "No records match. Try again")
result_farmers = []
if int(id) == 3:
query = "SELECT * FROM User_farmer"
cursor.execute(query, [])
result_farmers = namedtuplefetchall(cursor)
context_data = {
"user" : current_user,
"user_class": user_class,
"query_id": id,
"result_header": result_header,
"result": result,
"result_farmers": result_farmers,
}
return render(request, "query.html" , context_data)
def get_faq(request):
check = check_if_auth_user(request)
current_user = None
user_class = None
if check:
cursor = connection.cursor()
user_class = request.session["user_class"]
if user_class == 'E':
query = "SELECT * FROM User_expert WHERE `User_expert`.'user_id' = %s"
else:
query = "SELECT * FROM User_farmer WHERE `User_farmer`.'user_id' = %s"
cursor.execute(query, [check, ])
result = namedtuplefetchall(cursor)
current_user = result[0]
context_data = {
"user" : current_user,
}
return render(request, "faq.html" , context_data)
def about_us(request):
check = check_if_auth_user(request)
current_user = None
user_class = None
if check:
cursor = connection.cursor()
user_class = request.session["user_class"]
if user_class == 'E':
query = "SELECT * FROM User_expert WHERE `User_expert`.'user_id' = %s"
else:
query = "SELECT * FROM User_farmer WHERE `User_farmer`.'user_id' = %s"
cursor.execute(query, [check, ])
result = namedtuplefetchall(cursor)
current_user = result[0]
context_data = {
"user" : current_user,
}
return render(request, "aboutus.html" , context_data) | StarcoderdataPython |
3381804 | <gh_stars>0
import pymia.deeplearning.model as mdl
import torch.optim as optim
import torch.nn as nn
import mialab.configuration.config as cfg
class TorchMRFModel(mdl.TorchModel):
def inference(self, x) -> object:
return self.network(x)
def loss_function(self, prediction, label=None, **kwargs):
loss_val = self.loss(prediction, label)
return loss_val
def optimize(self, **kwargs):
self.optimizer.step()
def __init__(self, sample: dict, config: cfg.Configuration, network):
super().__init__(config.model_dir, 3)
self.learning_rate = config.learning_rate
self.dropout_p = config.dropout_p
self.network = network(2, cfg.NO_CLASSES, n_channels=config.n_channels, n_pooling=config.n_pooling)
self.loss = nn.CrossEntropyLoss()
self.optimizer = optim.Adam(self.network.parameters(), lr=self.learning_rate)
def epoch_summaries(self) -> list:
return []
def batch_summaries(self):
return []
def visualization_summaries(self):
return []
| StarcoderdataPython |
4800951 | # -*- coding: utf-8 -*-
'''
torstack.storage.sync_memcache
sync memcache storage definition.
:copyright: (c) 2018 by longniao <<EMAIL>>
:license: MIT, see LICENSE for more details.
'''
import memcache
class SyncMemcahhe(object):
def __init__(self, configs=[], expire=1800, debug=False):
if not configs:
raise BaseException('100001', 'error memcache config.')
self.config_list = []
self._client = None
self._expire = expire
self._debug = debug
self.init_configs(configs)
def init_configs(self, configs=[]):
'''
Init configurations.
:param self:
:param config:
:return:
'''
if isinstance(configs, list):
pass
elif isinstance(configs, dict):
configs = [configs]
else:
raise BaseException('10101', 'error memcache config.')
for config in configs:
try:
host, port, weight = config.get('host'), config.get('port'), config.get('weight')
if not isinstance(host, str):
raise ValueError('Invalid host')
if not port:
port = 11211
elif not isinstance(port, int):
raise ValueError('Invalid port')
engine_url = '%s:%d' % (host, port)
self.config_list.append((engine_url, weight))
except Exception as e:
raise Exception('error: %s', str(e))
def _create_client(self):
'''
create memcache client
:return:
'''
self._client = memcache.Client(self.config_list, debug=self._debug)
@property
def client(self):
'''
get redis client
:return:
'''
if not self._client:
self._create_client()
return self._client
def get(self, key):
'''
get value of key
:param key:
:return:
'''
return self.client.get(key)
def save(self, key, value, lifetime=None):
'''
save key value
:param key:
:param value:
:param lifetime:
:return:
'''
self.client.set(key, value)
if lifetime:
self.client.expire(key, lifetime)
return
def delete(self, key):
'''
delete key
:param key:
:return:
'''
return self.client.expire(key, 0)
def expire(self, key, lifetime=0):
'''
set key expire time
:param key:
:param lifetime:
:return:
'''
return self.client.expire(key, lifetime)
| StarcoderdataPython |
176147 | # ========== (c) <NAME> 3/8/21 ==========
import pandas as pd
import numpy as np
import scipy.stats
desired_width = 320
pd.set_option('display.max_columns', 20)
pd.set_option('display.width', desired_width)
# ==========================
# For datasets 1-3
# ==========================
df = pd.read_csv("data-vid/example_dataset1.csv", index_col=0)
df.head()
df["variable"].unique()
import plotly.express as px
fig = px.line(df, x="Time", y="value", color="variable")
fig.show()
fig = px.line(df, x="Time", y="value", color="variable", facet_col="variable")
fig.show()
fig = px.line(df, x="Time", y="value", color="variable",
title="Example Dataset 1", template="plotly_dark", width=800, height=600)
fig.show()
# Calculate pearson coefficients
# Option 1
df = df.pivot('Time', 'variable', 'value').reset_index() # df.pivot(index, columns, values)
df.head()
print(scipy.stats.pearsonr(df["Variable A"], df["Variable B"]))
# Option 2
print(df[df["variable"] == "Variable A"]["value"])
print(scipy.stats.pearsonr(df[df["variable"] == "Variable A"]["value"], df[df["variable"] == "Variable B"]["value"]))
# ==========================
# For dataset 4
# ==========================
df = pd.read_csv("data-vid/example_dataset4.csv", index_col=0)
df.head()
df.sym.unique()
fig = px.line(df, x="date", y="close", color="sym",
title=f"Example Dataset - {df.sym.unique()}", template="plotly_dark", width=800, height=600)
fig.show()
fig = px.line(df, x="date", y="norm_close", color="sym",
title=f"Example Dataset - {df.sym.unique()}", template="plotly_dark", width=800, height=600)
fig.show()
print(scipy.stats.pearsonr(df[df["sym"] == "AAPL"]["close"], df[df["sym"] == "MMM"]["close"]))
# But how would we compare many, many pairs of stocks?
# For example - if we are potentially interested in the S&P 500, that's 500 stocks - which includes almost 125,000 pairs of stocks
# This is where automation comes in handy
import utils
# ========== GET DATA ==========
symbol_dict = utils.load_data("data")
df = utils.symbol_dict_to_df(symbol_dict)
df = utils.normalise_price(df)
symbols = list(np.sort(df["symbol"].unique()))
# ========== DETERMINE SIMILARITIES ==========
# Calculate similarities between each stock
import datetime
starttime = datetime.datetime.now()
r_array = np.zeros([len(symbols), len(symbols)])
p_array = np.zeros([len(symbols), len(symbols)])
for i in range(len(symbols)):
for j in range(len(symbols)):
vals_i = df[df["symbol"] == symbols[i]]['close'].values
vals_j = df[df["symbol"] == symbols[j]]['close'].values
r_ij, p_ij = scipy.stats.pearsonr(vals_i, vals_j)
r_array[i, j] = r_ij
p_array[i, j] = p_ij
elapsed = datetime.datetime.now()-starttime # Takes about 2 minutes
fig = px.imshow(r_array)
fig.show()
fig = px.imshow(r_array, x=symbols, y=symbols,
color_continuous_scale=px.colors.sequential.Blues)
fig.show()
r_df = pd.DataFrame(r_array, index=symbols, columns=symbols)
tmp_ser = r_df.loc["MSFT", :].sort_values(ascending=False)
tmp_df = df[(df["symbol"] == "MSFT") | (df["symbol"] == "BAX")]
fig = px.line(df, x="date", y="norm_close", color="symbol",
title=f"Example Comparison", template="plotly_dark", width=800, height=600)
fig.show()
# ========== SELECT PORTFOLIO ==========
# Find negatively correlated stocks to some companies
# AAPL, MSFT
init_syms = ["AAPL", "MSFT"]
new_syms = list()
n_targets = 1
for sym in init_syms:
tmp_df = r_df[sym]
new_syms += tmp_df.sort_values()[:n_targets].index.to_list()
new_syms = list(set(new_syms))
list(new_syms)
port_syms = init_syms + new_syms
filt_df = df[df.symbol.isin(port_syms)]
fig = px.line(filt_df, x="date", y="norm_close", color="symbol",
title=f"Example Comparison", template="plotly_dark", width=800, height=600)
fig.show()
# Find lowest-correlated stocks to some companies
init_syms = ["AAPL", "MSFT"]
new_syms = list()
n_targets = 1
for sym in init_syms:
tmp_df = r_df[sym]
tmp_df = tmp_df[tmp_df > 0]
new_syms += tmp_df.sort_values()[:n_targets].index.to_list()
new_syms = list(set(new_syms))
list(new_syms)
port_syms = init_syms + new_syms
filt_df = df[df.symbol.isin(port_syms)]
fig = px.line(filt_df, x="date", y="norm_close", color="symbol",
title=f"Example Comparison", template="plotly_dark", width=800, height=600)
fig.show()
filt_df["init_syms"] = True
filt_df.loc[filt_df["symbol"].isin(new_syms), "init_syms"] = False
fig = px.line(filt_df, x="date", y="norm_close", color="symbol", facet_col="init_syms",
title=f"Example Comparison", template="plotly_dark", width=800, height=600)
fig.show()
avg_df = filt_df.groupby('date').mean()["norm_close"].reset_index()
avg_df["symbol"] = "avg"
filt_df = pd.concat([filt_df, avg_df])
fig = px.line(filt_df, x="date", y="norm_close", color="symbol",
title=f"Example Comparison", template="plotly_dark", width=800, height=600)
fig.show()
color_map = {s: "grey" for s in port_syms}
color_map["avg"] = "red"
fig = px.line(filt_df, x="date", y="norm_close", color="symbol",
color_discrete_map=color_map,
title=f"Example Comparison", template="plotly_dark", width=800, height=600)
fig.show()
| StarcoderdataPython |
9644500 | import numpy as np
import pandas as pd
import scipy.spatial.distance as ssd
from sklearn.utils import check_array
import logging
from time import time
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.propagate = False
class iNNEDetector(object):
"""
Create an ensemble classifier for anomaly detection based on iNNE method (cite iNNE paper)
Parameters
----------
ensemble_size : int
Number of ensembles for the classifier
sample_size : int
Number of samples on each ensemble
metric : str
Metric used by iNNE. Default 'euclid'
verbose : bool
default True
"""
def __init__(self,ensemble_size=100,sample_size=32,metric='euclid',verbose=True):
self.ensemble_size = ensemble_size
self.sample_size = sample_size
self.metric = metric
self.verbose = verbose
def _D(self,x,y,metric):
"""
Calculates the distance between x and y according to metric 'metric'
Parameters
----------
x : numpy array
1-d vector of dimension d
y : numpy array
1-d vector of dimension d
metric: str
specify the metric used (default euclidian metric)
Returns
-------
D(x | y) : Distance between x and y according to metric
"""
if metric == 'euclid' or metric == 'Euclid':
return np.linalg.norm(x-y)
if metric == 'kolmogorov' or metric == 'Kolmogorov':
#check normalization
norm_x = np.around(np.linalg.norm(x),decimals=10)
norm_y = np.around(np.linalg.norm(y),decimals=10)
if norm_x == 1 and norm_y == 1:
return np.sqrt(1 - np.around(np.absolute(np.dot(x,y))),decimals=10)
else:
raise NameError('%s metric supports only normalized vectors'
% metric)
if metric == 'chebyshev' or metric == 'Chebyshev':
return ssd.chebyshev(x,y)
else:
raise NameError('%s metric not supported'
% metric)
def _generate_spheres(self,X_s):
"""
Generates set of hyperspheres from sample X_s
Parameters
----------
X_s : numpy array
dimensions: sample_size X nb_features
Returns
-------
spheres : list
list of tuples storing sphere's center, radius and nearest neighbour index
"""
spheres = []
for i in range(X_s.shape[0]):
k = int(np.random.randint(X_s.shape[0],size=1))
while k==i:
k = int(np.random.randint(X_s.shape[0],size=1))
radius = self._D(X_s[i],X_s[k],self.metric)
nn_index = k
for j in range(X_s.shape[0]):
if self._D(X_s[i],X_s[j],self.metric) < radius and j!=i:
radius = self._D(X_s[i],X_s[j],self.metric)
nn_index = j
spheres.append((X_s[i], radius, nn_index))
return spheres
def _score(self,y,spheres):
"""
Returns the anomaly score for vector y based on the given set of spheres
Parameters
----------
y : numpy array
1-d vector of dimension d to score
spheres : list
list of 3-d tuples where each tuple contain sphere center, radius and nearest neighbour index
Returns
-------
score : float
anomaly score
"""
spheres_in=[]
for sphere in spheres:
if self._D(y,sphere[0],self.metric) <= sphere[1]:
spheres_in.append(sphere)
if len(spheres_in) == 0:
B = ()
elif len(spheres_in) != 0:
B = spheres_in[int(np.random.randint(len(spheres_in),size=1))]
for sphere_in in spheres_in:
if sphere_in[1] < B[1]:
B = sphere_in
if B == ():
score = 1
else:
score = 1 - (float(spheres[B[2]][1])/float(B[1]))
return score
def fit(self,X,y=None):
"""
Generates sets of hyper-spheres for anomaly scores
Parameters
----------
X : numpy array (nb_samples, nb_features)
data set
Returns
-------
self
"""
t_0 = time()
check_array(X)
self._sets_of_spheres = []
if self.verbose:
logger.info('generating sets of spheres...')
for j in range(self.ensemble_size):
X_s = np.random.permutation(X)[:self.sample_size,:]
spheres = self._generate_spheres(X_s)
self._sets_of_spheres.append(spheres)
t_f = time() - t_0
m,s = divmod(t_f, 60)
h,m = divmod(m, 60)
if self.verbose:
logger.info('Total run time: %i:%i:%i'
% (h,m,s))
return self
def fit_transform(self,X,y=None):
"""
Generates sets of hyper-spheres for anomaly scores
Parameters
----------
X : numpy array (nb_samples, nb_features)
data set
Returns
-------
self
"""
t_0 = time()
check_array(X)
self._sets_of_spheres = []
if self.verbose:
logger.info('generating sets of spheres...')
for j in range(self.ensemble_size):
X_s = np.random.permutation(X)[:self.sample_size,:]
spheres = self._generate_spheres(X_s)
self._sets_of_spheres.append(spheres)
t_f = time() - t_0
m,s = divmod(t_f, 60)
h,m = divmod(m, 60)
if self.verbose:
logger.info('Total run time: %i:%i:%i'
% (h,m,s))
return self
def fit_score(self,X,y=None):
"""
Generate set of hyper-sphere and return anomaly score for all points in dataset
Parameters
----------
X : numpy array
data set
Return
------
scores : numpy array
1-d vector with the anomaly scores for all data points
"""
t_0 = time()
check_array(X)
self._sets_of_spheres = []
if self.verbose:
logger.info('generating sets of spheres...')
for j in range(self.ensemble_size):
X_s = np.random.permutation(X)[:self.sample_size,:]
spheres = self._generate_spheres(X_s)
self._sets_of_spheres.append(spheres)
scores = np.zeros(X.shape[0])
for i in range(X.shape[0]):
if i % 1000 == 0 and self.verbose:
logger.info('Getting anomaly score for data point %i'
% i)
logger.info('X shape: %i X %i'
% X.shape)
scores_i = []
j=0
for spheres in self._sets_of_spheres:
score = self._score(X[i],spheres)
if i % 1000 == 0 and j % 10 ==0 and self.verbose:
logger.info('Anomaly score for data point %i from estimator %i: %f'
% (i,j,score))
scores_i.append(score)
j+=1
scores[i] = np.mean(scores_i)
if 'X_scored' not in dir(self):
self.X_scored = np.column_stack((X,scores))
t_f = time() - t_0
m,s = divmod(t_f, 60)
h,m = divmod(m, 60)
if self.verbose:
logger.info('Total run time: %i:%i:%i'
% (h,m,s))
return scores
def get_all_scores(self):
"""
Returns the dataset with the anomaly scores stored in the last column
Parameters
----------
None
Returns
-------
X_scored : numpy array
the dataset with anomaly scores stored in the last column
"""
if 'X_scored' in dir(self):
return self.X_scored
else:
raise NameError('method get_all_scores returns scores only if method fit_score has been previously called')
return self
def get_score(self,X):
"""
Calculates the anomaly score for a new data point X
Parameters
----------
y : numpy array
1-d vector to score
Returns
-------
score : tuple
tuple where first element is the anomaly score and the second element is True if the point is lab elled as anomalous and False if is labelled as non-anomalous based on the decision threshold
"""
if X.ndim == 1:
s = np.zeros(2)
scores = []
for spheres in self._sets_of_spheres:
score_s = self._score(X,spheres)
scores.append(score_s)
score_mean = np.mean(scores)
s[0]=score_mean
s[1]=1-score_mean
return s
elif X.ndim == 2:
s = np.zeros((X.shape[0],2))
for i in range(X.shape[0]):
scores = []
for spheres in self._sets_of_spheres:
score_s = self._score(X,spheres)
scores.append(score_s)
score_mean = np.mean(scores)
s[i,0] = score_mean
s[i,1] = 1-score_mean
return s
def get_anomalies(self,decision_threshold=1):
"""
Returns the data points whose anomaly score is above the decision_threshold
Parameters
----------
decition_threshold : float
anomaly decision threshold. Default 0.5
Returns
-------
X_anom: numpy array (nb_anomalies, nb_features + 1)
anomalous data points with anomaly scores stored in the last column
"""
if 'X_scored' in dir(self):
X_tmp = self.X_scored[:,:-1]
scores_tmp = self.X_scored[:,-1]
X_an = X_tmp[scores_tmp>=decision_threshold]
anom_scores = scores_tmp[scores_tmp>=decision_threshold]
self.X_anom = np.column_stack((X_an,anom_scores))
return self.X_anom
else:
raise NameError('method get_anomalies returns scores only if method fit_score has been previously called')
return self
| StarcoderdataPython |
1958073 | #!/usr/bin/env python3
import argparse
import random
import os
import pickle
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.utils import to_categorical
from generator import DataGenerator
from build_model import build_model
def load_data(path_image):
print("LOADING IMAGES...")
#Load image from path_image
image_path = os.listdir(path_image)
image_path = list(filter(lambda x: x[-3:].lower() == 'jpg' or x[-3:].lower() == 'png', image_path))
#image_path = np.repeat(image_path, 10)
#Result variable
list_image = []
list_label = []
#Mapping image with label
for (j, imagePath) in enumerate(image_path):
listPath = imagePath.split('-')
list_image.append(imagePath)
list_label.append(listPath[0])
num_img = len(list_image)
print("Total images: %d" % num_img)
return list_image, list_label
def encode_label(list_label, save_file):
print("ENCODING LABELS...")
dir = './'
if os.path.exists(os.path.join(dir, save_file)):
print("LOADING LABEL MAP")
label_map = pickle.load(open(os.path.join(dir, save_file), 'rb'))
else:
print("SAVE LABEL MAP")
set_list_label = set(list_label)
set_list_label = sorted(set_list_label)
label_map = dict((c, i) for i, c in enumerate(set_list_label))
pickle.dump(label_map, open(os.path.join(dir, save_file), 'wb'))
print("LABEL MAP", label_map)
encoded = [label_map[x] for x in list_label]
encoded = to_categorical(encoded)
print("Load or Save file %s success" % save_file)
return encoded
def train_model(model, baseModel, X_train, y_train, X_test=None, y_test=None, args=None, n_classes=0, batch_size=32, ckpt_path='./ckpt', model_ckpt='model_best_ckpt.h5'):
"""
TRAIN MODEL HAZARD DETECT
"""
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
aug_train = DataGenerator(X_train, y_train, args.img_path, 3, batch_size=batch_size, n_classes=n_classes)
aug_valid = DataGenerator(X_valid, y_valid, args.img_path, 3, batch_size=batch_size, n_classes=n_classes)
aug_test = DataGenerator(X_test, y_test, args.img_path, 3, batch_size=batch_size, n_classes=n_classes)
checkpoint = ModelCheckpoint(os.path.join(ckpt_path, model_ckpt), monitor="val_loss",
save_best_only=True, mode='min', save_weights_only=True, save_freq='epoch')
early_stop = EarlyStopping(monitor='val_loss', patience=10)
# Load checkpoint
if os.path.exists(os.path.join(ckpt_path, model_ckpt)):
print("LOADING MODEL WEIGHT...")
model.load_weights(os.path.join(ckpt_path, model_ckpt))
else:
print("CREATE MODEL WEIGHT FILE...")
if(args.step <= 1):
print("TRAINING MODEL STEP 1...")
# freeze EfficientNetB2 model
for layer in baseModel.layers:
layer.trainable = False
opt = RMSprop(0.001)
model.compile(opt, 'categorical_crossentropy', ['accuracy'])
if (args.validation):
H = model.fit(aug_train, validation_data=aug_valid, epochs=args.epoch_step_1, callbacks=[checkpoint, early_stop])
else:
H = model.fit(aug_train, epochs=args.epoch_step_1, callbacks=[checkpoint, early_stop])
if(args.step <= 2):
print("TRAINING MODEL STEP 2...")
# unfreeze all CNN layer in EfficientNetB2:
for layer in baseModel.layers[162:]:
layer.trainable = True
opt = Adam(learning_rate=0.001, decay=5e-5)
model.compile(opt, 'categorical_crossentropy', ['accuracy'])
if (args.validation):
H = model.fit(aug_train, validation_data=aug_valid, epochs=args.epoch_step_2, callbacks=[checkpoint, early_stop])
else:
H = model.fit(aug_train, epochs=args.epoch_step_2, callbacks=[checkpoint, early_stop])
if(args.step <= 3):
print("EVALUTE MODEL STEP 3...")
opt = Adam(learning_rate=0.001, decay=5e-5)
model.compile(opt, 'categorical_crossentropy', ['accuracy'])
score = model.evaluate(aug_test, verbose=1, batch_size=batch_size)
print("TEST LOST, TEST ACCURACY:",score)
print("FINISH TRAINING MODEL...")
def main(args):
print("START MAIN CLASS TRAINING MODEL")
list_image, list_label = load_data(args.img_path)
print("LIST CLASSES BEFORE SHUFFLE", set(list_label))
labels = encode_label(list_label, args.mapping_file)
n_classes = len(set(list_label))
print("NUM CLASSES", n_classes)
print("LIST CLASSES AFTER SHUFFLE", set(list_label))
baseModel, mainModel = build_model(n_classes, args.appEfficientNet)
batch_size = args.batch_size
model_ckpt = args.model_ckpt
if (args.validation):
X_train, X_test, y_train, y_test = train_test_split(list_image, labels, test_size=0.2, random_state=42)
train_model(model=mainModel, baseModel=baseModel, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test, args=args, n_classes=n_classes, batch_size=batch_size, model_ckpt=model_ckpt)
else:
train_model(model=mainModel, baseModel=baseModel, X_train=list_image, y_train=labels, args=args, n_classes=n_classes, batch_size=batch_size, model_ckpt=model_ckpt)
print("FINISH MAIN CLASS TRAINING MODEL")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--img_path', help='Path to folder which contains images.', type=str, default='./images-cropped')
parser.add_argument('--mapping_file', help='Path to save label map file.', type=str, default='label_map.pkl')
parser.add_argument('--epoch_step_1', help='Number of epochs for training step 1.', type=int, default=30)
parser.add_argument('--epoch_step_2', help='Number of epochs for training step 2.', type=int, default=100)
parser.add_argument('--validation', help='Wheather to split data for validation.', type=bool, default=True)
parser.add_argument('--step', help='Training model step (1, 2, 3)', type=int, default=0)
parser.add_argument('--appEfficientNet', help='EfficientNetB0, 1, 2, 3, 4, 5, 6, 7', type=int, default=0)
parser.add_argument('--batch_size', help='Number of batch size for training', type=int, default=32)
parser.add_argument('--model_ckpt', help='File name model ckpt h5', type=str, default='model_best_ckpt.h5')
args = parser.parse_args()
main(args)
| StarcoderdataPython |
11291510 | #!/usr/bin/env python
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('img-desktop-kv-background.jpg')
blurred_img = cv2.blur(img, ksize=(4, 4))
edges = cv2.Canny(image=blurred_img, threshold1=20, threshold2=60)
plt.imshow(edges)
plt.show()
| StarcoderdataPython |
298753 | <reponame>exekias/beats-kubernetes-demo<filename>app/questions/views.py
from django.views.generic.edit import CreateView
from django.urls import reverse_lazy
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from django.contrib import messages
from django.db.models import Count
from django.http import HttpResponseBadRequest
from .models import Question, QuestionVote
def get_session_key(request):
if not request.session.session_key:
request.session.save()
return request.session.session_key
def index(request):
session_key = get_session_key(request)
questions = Question.objects.annotate(num_votes=Count('votes')).order_by('-num_votes')[:30]
votes = QuestionVote.objects.filter(session_key=session_key).values_list('question__id', flat=True)
context = {
'questions': questions,
'votes': votes,
}
return render(request, 'questions/index.html', context)
def about(request):
return render(request, 'questions/about.html', {})
class QuestionCreate(CreateView):
model = Question
fields = ['title', 'author']
success_url = reverse_lazy('questions:index')
def form_valid(self, form):
# store author name
self.request.session['author'] = form.instance.author
return super(QuestionCreate, self).form_valid(form)
def get_form_kwargs(self):
kwargs = super(QuestionCreate, self).get_form_kwargs()
author = self.request.session.get('author', '')
kwargs.get('initial', {})['author'] = author
return kwargs
def vote(request, question_id):
session_key = get_session_key(request)
# Get question
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
messages.add_message(request, messages.ERROR, 'Given question doesn\'t exist')
return HttpResponseBadRequest('Given question doesn\'t exist')
try:
vote = QuestionVote(question=question, session_key=session_key)
vote.full_clean()
vote.save()
except ValidationError:
messages.add_message(request, messages.ERROR, 'Error voting question, voted twice?')
return HttpResponseBadRequest('Error voting question, voted twice?')
return redirect('questions:index')
| StarcoderdataPython |
280035 | <filename>cleanup.py
import boto3
import json
import sys
# Prototype to remove s3 records and dynamodb records for images that have been removed from ECR.
# Right now, the list_repos.py has to be run under 10011 credentials to build the list of all repos,
# then this script runs under 10021 credentials to remove s3 reports and dynamodb entries for any repos that
# no longer exist, so we don't report on them, or trigger clair layer notifications for them.
# This should be wrapped into lambda functions to run periodically, or on notification of a delete
# from ECR
BUCKET = 'ecrscan-clair-scan-results'
raw = None
with open(sys.argv[1]) as f:
raw = json.load(f)
images = {}
for image in raw:
registryId = image['registryId']
repository = image['repository']
imageDigest = image['imageDigest'].split('sha256:')[1]
if registryId not in images.keys():
images[registryId] = {}
if repository not in images[registryId].keys():
images[registryId][repository] = set()
if imageDigest not in images[registryId][repository]:
images[registryId][repository].add(imageDigest)
s3 = boto3.client('s3')
reports = []
response = s3.list_objects_v2(
Bucket=BUCKET
)
for k in response['Contents']:
reports.append(k['Key'])
continuationToken = None
if response['IsTruncated']:
continuationToken = response['NextContinuationToken']
while continuationToken is not None:
response = s3.list_objects_v2(
Bucket=BUCKET,
ContinuationToken=continuationToken
)
for k in response['Contents']:
reports.append(k['Key'])
continuationToken = None
if response['IsTruncated']:
continuationToken = response['NextContinuationToken']
to_delete = []
for key in reports:
# value='year=2019/month=08/day=09/registry_id=434313288222/prod/workflow-api/457531f2efe6475baef56af1248930f46bc8b7992bedfb072248fc8ec38250b6.json.gz'
value = key
value = value.split('/', 1)[1]
value = value.split('/', 1)[1]
value = value.split('/', 1)[1]
values = value.split('/', 1)
registry_id = values[0].split('registry_id=')[1]
value = values[1]
values = value.split('/')
repository = '/'.join(values[:-1])
image_digest = values[-1].split('.json.gz')[0]
# print(registry_id)
# print(repo_name)
# print(image_digest)
delete = True
if not (registry_id in images and repository in images[registry_id] and image_digest in images[registry_id][repository]):
to_delete.append(key)
print("Deleting s3 reports:")
for k in to_delete:
print(k)
s3.delete_object(
Bucket=BUCKET,
Key=k
)
def should_delete_from_db(item, images):
registryId = item['image_data']['M']['registryId']['S']
repository = item['image_data']['M']['repositoryName']['S']
imageDigest = item['image_data']['M']['imageId']['M']['imageDigest']['S']
imageDigest = imageDigest.split('sha256:')[1]
exists = registryId in images and repository in images[registryId] and imageDigest in images[registryId][repository]
return not exists
to_delete = []
db = boto3.client('dynamodb')
response = db.scan(
TableName='clair-indexed-layers',
ConsistentRead=True
)
for item in response['Items']:
if should_delete_from_db(item, images):
to_delete.append({
'layer_name': item['layer_name']['S'],
'image_name': item['image_name']['S']
})
last_evaluated_key = None
if 'LastEvaluatedKey' in response:
last_evaluated_key = response['LastEvaluatedKey']
while last_evaluated_key is not None:
response = db.scan(
TableName='clair-indexed-layers',
ConsistentRead=True,
ExclusiveStartKey=last_evaluated_key
)
if should_delete_from_db(item, images):
to_delete.append({
'layer_name': item['layer_name']['S'],
'image_name': item['image_name']['S']
})
last_evaluated_key = None
if 'LastEvaluatedKey' in response:
last_evaluated_key = response['LastEvaluatedKey']
print("delete dynamodb records:")
for item in to_delete:
print(item)
db.delete_item(
TableName='clair-indexed-layers',
Key={
'layer_name': {
'S': item['layer_name']
},
'image_name': {
'S': item['image_name']
}
}
)
| StarcoderdataPython |
6436405 | <filename>compiler/cpsconvert.py
from . import ast as A
from . import parse as P
from . import string2ast as S2A
from . import symbol as S
def cpsConvert(ast):
def cps(ast, contAst):
if A.isLit(ast):
ret = A.makeApp([contAst, ast])
return ret
if A.isRef(ast):
ret = A.makeApp([contAst, ast])
return ret
if A.isSetClj(ast):
def fn(val):
return A.makeApp(
[contAst,
A.makeSet(val,
A.setVar(ast))])
ret = cpsList(A.astSubx(ast), fn)
return ret
if A.isCnd(ast):
def xform(contAst):
def fn(test):
testExp = test[0]
ifClause = A.astSubx(ast)[1]
elseClause = A.astSubx(ast)[2]
ret = A.makeCnd([testExp,
cps(ifClause, contAst),
cps(elseClause, contAst)])
return ret
return cpsList([A.astSubx(ast)[0]], fn)
if A.isRef(contAst):
ret = xform(contAst)
return ret
else:
k = S.newVar('k')
ret = A.makeApp([A.makeLam([xform(A.makeRef([], k))], [k]), contAst])
return ret
if A.isPrim(ast):
def fn(args):
return A.makeApp([contAst, A.makePrim(args, A.primOp(ast))])
ret = cpsList(A.astSubx(ast), fn)
return ret
if A.isApp(ast):
func = A.astSubx(ast)[0]
def fn1(vals):
lam = A.makeLam(
[cpsSeq(A.astSubx(func), contAst)],
A.lamParams(func))
return A.makeApp([lam] + vals)
def fn2(args):
return A.makeApp([args[0], contAst] + args[1:])
if A.isLam(func):
ret = cpsList(A.astSubx(ast)[1:], fn1)
return ret
else:
ret = cpsList(A.astSubx(ast), fn2)
return ret
if A.isLam(ast):
k = S.newVar("k")
ret = A.makeApp([contAst, A.makeLam(
[cpsSeq(A.astSubx(ast), A.makeRef([], k))],
[k] + A.lamParams(ast))])
return ret
if A.isSeqClj(ast):
ret = cpsSeq(A.astSubx(ast), contAst)
return ret
print('Unknown AST {}'.format(ast))
exit()
def cpsList(asts, inner):
def body(x):
def fn(newAsts):
return inner([x] + newAsts)
ret = cpsList(asts[1:], fn)
return ret
if not asts:
return inner([])
if A.isLit(asts[0]) or A.isRef(asts[0]):
return body(asts[0])
r = S.newVar("r")
return cps(asts[0], A.makeLam([body(A.makeRef([], r))], [r]))
def cpsSeq(asts, contAst):
if not asts:
return A.makeApp([contAst, False])
if not asts[1:]:
return cps(asts[0], contAst)
r = S.newVar('r')
return cps(asts[0], A.makeLam([cpsSeq(asts[1:], contAst)], [r]))
r = S.newVar('r')
cpsAst = cps(ast, A.makeLam([A.makePrim([A.makeRef([], r)], '%halt')], [r]))
if S.lookup('call/cc', S.fv(ast)):
l = A.makeLam([cpsAst], [S.newVar('_')])
x = S2A.parse('(set! call/cc (lambda (k f) (f k (lambda (_ result) (k result)))))')
return A.makeApp([l, x])
else:
return cpsAst
| StarcoderdataPython |
6621252 | <reponame>qq1418381215/caat<gh_stars>10-100
import torch
from torch.autograd import Function
from torch.nn import Module
from .warp_rnnt import *
from .rnnt import rnnt_loss,RNNTLoss
from .delay_transducer import delay_transducer_loss, DelayTLoss
__all__ = ['rnnt_loss', 'RNNTLoss','delay_transducer_loss', 'DelayTLoss']
| StarcoderdataPython |
107917 | #!/usr/bin/python
# -*- coding: iso-8859-1 -*-
from collections import namedtuple
from math import sqrt
import random
Cluster = namedtuple('Cluster', ('points', 'center', 'n'))
def calculate_center(points):
vals = [0,0]
if len(points) < 1:
return vals
plen = 0
for p in points:
plen += 1
vals[0] += p[0]
vals[1] += p[1]
return (vals[0]/plen, vals[1]/plen)
def calculate_distance(p1,p2):
return sqrt((p1[0]-p2[0])**2+(p1[1]-p2[1])**2)
def kmeans(points, k=2, min_diff=1):
if len(points) < 2:
print "Not enough points for k-means"
return
centers = [points[0],points[1]]
plists = []
while 1:
plists = [[] for i in range(k)]
for p in points:
smallest_distance = float('Inf')
for i in range(k):
distance = calculate_distance(p, centers[i])
if distance < smallest_distance:
smallest_distance = distance
idx = i
plists[idx].append(p)
diff = 0
for i in range(k):
old = centers[i]
new = calculate_center(plists[i])
centers[i] = new
diff = max(diff, calculate_distance(old, new))
if diff < min_diff:
break
return plists
| StarcoderdataPython |
9610437 | <reponame>iashraful/pnp-graphql<filename>pnp_graphql/exceptions.py
from pnp_graphql import status_code
from django.utils.translation import ugettext_lazy as _
class APIBaseException(Exception):
status = status_code.HTTP_500_INTERNAL_SERVER_ERROR
message = _('Internal Server Error.')
error_key = 'error'
def __init__(self, message=None, status=None):
if status is not None:
self.status = status
if message is not None:
self.message = message
class APIAuthenticationError(APIBaseException):
status = status_code.HTTP_400_BAD_REQUEST
message = _('Error while authenticating.')
error_key = 'error'
class AuthenticationFailed(APIBaseException):
status = status_code.HTTP_401_UNAUTHORIZED
message = _('Incorrect authentication credentials.')
error_key = 'authentication_failed'
class NotAuthenticated(APIBaseException):
status = status_code.HTTP_401_UNAUTHORIZED
message = _('Authentication credentials were not provided.')
error_key = 'not_authenticated'
class PermissionDenied(APIBaseException):
status = status_code.HTTP_403_FORBIDDEN
message = _('You do not have permission to perform this action.')
error_key = 'permission_denied'
| StarcoderdataPython |
8029468 | <reponame>facebookresearch/uimnet<filename>scripts/run_prediction.py
#!/usr/bin/env python3
#
# # Copyright (c) 2021 Facebook, inc. and its affiliates. All Rights Reserved
#
#
"""
Evaluate in-domain metrics on the sweep directory
"""
import argparse
import os
import submitit
import pickle
import concurrent.futures
import torch
import copy
import pandas as pd
from pathlib import Path
from omegaconf import OmegaConf
import submitit
import torch.multiprocessing as tmp
from uimnet import utils
from uimnet import workers
from uimnet import __SLURM_CONFIGS__
PREDICTION_CFG = """
output_dir: null # subfolder. Mutable at dispatch
dataset:
name: ImageNat
root: /checkpoint/ishmaelb/data/datasets/ILSVRC2012
equalize_partitions: True
batch_size: 256
seed: 42
slurm:
preset: 'distributed_8' # Key-value pairs below override presets.
time: 180
mem_per_gpu: '32G'
cpus_per_task: 5
partition: learnfair
array_parallelism: 512
constraint: volta32gb
experiment:
distributed: True
platform: slurm
## ----- Mutable on the worker during distributed/device setup.
seed: 42 # Workers seed
device: 'cuda:0'
rank: null
local_rank: null
world_size: null
dist_protocol: null
dist_url: null
num_workers: 5
# ------
"""
def parse_arguments():
parser = argparse.ArgumentParser(description='')
parser.add_argument('-s', '--sweep_dir', type=str, required=True)
parser.add_argument('-f', '--force', action='store_true')
parser.add_argument('-o', '--output', type=str, default='indomain_table.tex')
return parser.parse_args()
@utils.timeit
def load_datasets(root, name, clustering_path):
with open(Path(clustering_path), 'rb') as fp:
clustering = pickle.load(fp)
datasets = {}
for split in ['train', 'val']:
datasets[split] = utils.partition_dataset(name=name,
root=root,
split=split,
partitions=clustering['partitions'],
equalize_partitions=True)
return datasets
class ExtractRecords(object):
def __call__(self, model_path):
with open(model_path / 'predictive_records.pkl', 'rb') as fp:
record = pickle.load(fp)
return utils.apply_fun(utils.to_scalar, record)
@utils.timeit
def get_indomain_records(models_paths):
models_paths = list(models_paths)
#max_workers = max(2, tmp.cpu_count() - 2)
max_workers = 10
# test = ExtractRecords()(model_path=models_paths[0])
with concurrent.futures.ProcessPoolExecutor(max_workers=max_workers) as executor:
all_records = list(executor.map(ExtractRecords(), models_paths))
return sum(all_records, [])
def make_indomain_dataframe(evaluation_records: list):
"""
Generates In-Domain table
-------------------------
"""
# Generating the predictive results dataframe
df = pd.DataFrame.from_records(evaluation_records).round(4)
group_keys = ['algorithm.arch', 'dataset.name', 'algorithm.name', 'algorithm.sn','temperature_mode', 'split']
val_keys = ['ACC@1', 'ACC@5', 'NLL', 'ECE']
group_df = df.groupby(group_keys)[val_keys]
mean_df, std_df, count_df = [el.round(4) for el in [group_df.mean(), group_df.std(), group_df.count()]]
final_df = utils.make_error_bars_df_with_count(mean_df, std_df, count_df)
return final_df
def run_prediction(sweep_dir, force):
sweep_path = Path(sweep_dir)
clustering_path = sweep_path / 'clustering.pkl'
models_paths = filter(utils.is_model, sweep_path.iterdir())
models_paths = list(filter(utils.train_done, models_paths))
prediction_cfg = OmegaConf.create(PREDICTION_CFG)
root = os.getenv('DATASETS_ROOT')
name = 'ImageNat'
datasets = load_datasets(root=root,
name=name,
clustering_path=clustering_path
)
executor = utils.get_slurm_executor(copy.deepcopy(prediction_cfg.slurm),
log_folder=str(sweep_path / 'logs' / 'run_prediction'))
# Constructing jobs
jobs, paths = [], []
with executor.batch():
# Construcing jobs
for model_path in models_paths:
if utils.prediction_done(model_path) and not force:
print(f'{model_path} is done. Skipping.')
continue
if (model_path / 'train_cfg.yaml').is_file():
train_cfg = utils.load_cfg(model_path / 'train_cfg.yaml')
elif (model_path / 'cfg_rank_0.yaml').is_file():
train_cfg = utils.load_cfg(model_path / 'cfg_rank_0.yaml')
else:
err_msg = 'train config not found'
raise ValueError(err_msg)
Algorithm = utils.load_model_cls(train_cfg)
worker_args = (
prediction_cfg,
train_cfg,
Algorithm,
datasets['train']['in'],
datasets['val']['in'])
worker = workers.Predictor()
job = executor.submit(worker, *worker_args)
jobs += [job]
paths += [model_path]
utils.write_trace('prediction.pending', dir_=str(model_path))
beholder = utils.Beholder(list(zip(jobs, paths)), stem='prediction')
beholder.start()
finished_jobs, jobs = utils.handle_jobs(jobs)
# Collecting results
jobs_results = [job.results() for job in finished_jobs]
return jobs_results
if __name__ == '__main__':
args = parse_arguments()
output = run_prediction(args.sweep_dir, force=args.force)
| StarcoderdataPython |
5011817 | <reponame>tschijnmo/FFOMP<gh_stars>0
"""
Models for molecular mechanics
==============================
In this package, models are provided to model some molecular and material
properties based on some force fields.
Most often force fields are going to model the energy and atomic forces based
on the atomic coordinates and their symbols (types). For these situations, the
convention is that the atomic symbol and coordinates needs to be in property
``atm_symbs`` and ``atm_coords``, both of which should be a list with one entry
of each of the atoms. Then based on these information, possibly some other bond
connectivity information, the interaction energy of the atoms will be modelled
and put into a property with tag ``static_energy``, also the forces will also
be modelled and put in the property ``atm_forces``. This convention is going to
be followed by all the models in this packages.
.. autosummary::
:toctree:
:template: module.rst
twobody
"""
| StarcoderdataPython |
8187784 | <reponame>elfosardo/coursera-dsa<filename>algorithms-on-graphs/Decomposition of Graphs 1/connected_components.py
# Uses python3
import sys
def number_of_components(adj):
visited = [False for x in range(len(adj))]
components = 0
def DFS(x):
visited[x] = True
for w in adj[x]:
if visited[w] is False:
DFS(w)
for vertex in range(len(adj)):
if visited[vertex] is False:
components += 1
DFS(vertex)
return components
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
n, m = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
adj = [[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
adj[b - 1].append(a - 1)
print(number_of_components(adj))
| StarcoderdataPython |
6453882 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import plotly as pxe
import plotly.express as px
country = pd.read_csv(r'C:\Users\Master\Desktop\Jupyter\country_vaccinations.csv')
country.describe()
# In[2]:
country
# In[3]:
np.unique(country['country'],return_counts= True)
# In[4]:
Brazil = country.loc[country['country'] == 'Brazil']
Brazil
# In[5]:
country.isnull().sum()
# In[6]:
Brazil['daily_vaccinations'].max()
# In[7]:
#Day with mores peoople been vacinated
Brazil[Brazil['daily_vaccinations'] == 1520483.0]
# In[8]:
grafico1 = px.scatter(Brazil, x='date', y='total_vaccinations', title='Comparation between number of vacines aplicated')
grafico1
# In[10]:
grafico2 = px.scatter(Brazil, x='date', y='people_fully_vaccinated', title='Comparation between date and fully vaccination people')
grafico2
# In[11]:
#Percent of peoople fully vacinated in Brazil
BrazilPopulation = 210
FullVaccination = 40
peooplevacc = FullVaccination/BrazilPopulation *100
peooplevacc
# In[12]:
grafico2 = px.scatter(Brazil, x='date', y='daily_vaccinations_per_million', title='Comparation between date and fully vaccination people per Million')
grafico2
# In[13]:
grafico3 = px.scatter(Brazil, x='date', y='people_fully_vaccinated_per_hundred', title='Comparation between date and fully vaccination people per Hundred')
grafico3
# In[14]:
EuaBrazil = country[country['country'].isin(['Brazil','United States'])]
EuaBrazil
# In[15]:
px.scatter(EuaBrazil,x='date',y='daily_vaccinations_per_million',title='Comparation between United States and Brazil',color='country')
# In[16]:
px.scatter(EuaBrazil, x='date', y='people_fully_vaccinated', title='Comparation between people fully vaccinated.EUA Population 328,2 milhões / Brazil Population 211 milhões',color='country')
# In[ ]:
| StarcoderdataPython |
3418544 | from setuptools import setup, find_packages
setup(
name='testspace-python',
version='',
packages=find_packages(include=['testspace', 'testspace.*']),
url='',
license="MIT license",
author="<NAME>",
author_email='<EMAIL>',
description="Module for interacting with Testspace Server",
install_requires=[
'requests',
]
)
| StarcoderdataPython |
11297106 | <gh_stars>10-100
#!/usr/bin/env python
# coding=utf-8
"""
@function:
@version: 1.0
@author: <NAME>
@license: Apache Licence
@file: anna_writer.py
@time: 2017/7/4 下午2:27
"""
import time
import numpy as np
import tensorflow as tf
# 读取训练数据
file_path = './data/anna.txt'
with open(file_path) as f:
text = f.read()
# print('text', text)
# 生成字符集合
# 使用set对列表去重,并保持列表原来的顺序
vocab = list(set(text))
vocab.sort(key=text.index)
# print('vocab\n', vocab)
print('len_vocab', len(vocab))
# 字符编码
vocab_to_int = {char: i for i, char in enumerate(vocab)}
# print('vocab_to_int\n', vocab_to_int)
int_to_vocab = dict(enumerate(vocab))
# print('int_to_vocab\n', int_to_vocab)
# 文本编码
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
# print('encoded\n', encoded)
# print('encoded_shape\n', np.shape(encoded))
def get_batch(raw_data, batch_size, seq_length):
"""
生成batch数据,
Args:
array:
batch_size:
seq_length:
Returns:
"""
data = np.array(raw_data)
data_length = data.shape[0]
num_batches = (data_length - 1) // (batch_size * seq_length)
assert num_batches > 0, "Not enough data, even for a single batch. Try using a smaller batch_size."
rounded_data_len = num_batches * (batch_size * seq_length)
xdata = np.reshape(data[0:rounded_data_len], [batch_size, num_batches * seq_length])
ydata = np.reshape(data[1:rounded_data_len + 1], [batch_size, num_batches * seq_length])
for batch in range(num_batches):
x = xdata[:, batch * seq_length:(batch + 1) * seq_length]
y = ydata[:, batch * seq_length:(batch + 1) * seq_length]
yield x, y
class language_model:
"""
文本生成模型
"""
def __init__(self, num_classes, batch_size=100, seq_length=50, learning_rate=0.01, num_layers=5, hidden_units=128,
keep_prob=0.8, grad_clip=5, is_training=True):
tf.reset_default_graph() # 模型的训练和预测放在同一个文件下时如果没有这个函数会报错。
self.learning_rate = learning_rate
self.num_layers = num_layers
self.hidden_units = hidden_units
self.is_training = is_training
self.keep_prob = keep_prob
self.grad_clip = grad_clip
self.num_classes = num_classes
if self.is_training:
self.batch_size = batch_size
self.seq_length = seq_length
else:
self.batch_size = 1
self.seq_length = 1
with tf.name_scope('add_input_layer'):
self.add_input_layer()
with tf.variable_scope('lstm_cell'):
self.add_multi_cells()
with tf.name_scope('build_output'):
self.build_output()
with tf.name_scope('acc'):
self.acc()
with tf.name_scope('cost'):
self.compute_cost()
with tf.name_scope('optimizer'):
self.optimizer()
def add_input_layer(self):
self.x = tf.placeholder(tf.int32,
shape=(self.batch_size, self.seq_length), name='inputs') # [batch_size, seq_length]
self.y = tf.placeholder(tf.int32,
shape=(self.batch_size, self.seq_length), name='targets') # [batch_size, seq_length]
# One-hot编码
self.inputs = tf.one_hot(self.x, self.num_classes) # [batch_size, seq_length, num_classes]
# self.inputs = tf.reshape(self.y, [-1, self.num_classes])
self.targets = tf.one_hot(self.y, self.num_classes) # [batch_size, seq_length, num_classes]
def lstm_cell(self):
# Or GRUCell, LSTMCell(args.hiddenSize)
with tf.variable_scope('lstm_cell'):
cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_units,
state_is_tuple=True)
# with tf.variable_scope('lstm_cell'):
# cell = tf.contrib.rnn.BasicLSTMCell(self.hidden_units,
# state_is_tuple=True,
# reuse=tf.get_variable_scope().reuse)
if self.is_training:
cell = tf.contrib.rnn.DropoutWrapper(cell,
input_keep_prob=1.0,
output_keep_prob=self.keep_prob)
return cell
def add_multi_cells(self):
stacked_cells = tf.contrib.rnn.MultiRNNCell([self.lstm_cell() for _ in range(self.num_layers)],
state_is_tuple=True)
with tf.name_scope('initial_state'):
# initial_state: [batch_size, hidden_units * num_layers]
self.initial_state = stacked_cells.zero_state(self.batch_size, dtype=tf.float32)
# cell_output: [batch_size, seq_length, hidden_units]
# final_state: [batch_size, hidden_units * num_layers]
self.cell_outputs, self.final_state = tf.nn.dynamic_rnn(cell=stacked_cells,
inputs=self.inputs,
initial_state=self.initial_state)
def build_output(self):
# seq_output = tf.concat(self.cell_outputs, axis=1)
# y0 = tf.reshape(seq_output, [-1, self.hidden_units]) # y0: [batch_size * seq_length, hidden_units]
y0 = tf.reshape(self.cell_outputs, [-1, self.hidden_units]) # y0: [batch_size * seq_length, hidden_units]
with tf.name_scope('weights'):
sofmax_w = tf.Variable(tf.truncated_normal([self.hidden_units, self.num_classes], stddev=0.1))
softmax_b = tf.Variable(tf.zeros(self.num_classes))
with tf.name_scope('wx_plus_b'):
self.logits = tf.matmul(y0, sofmax_w) + softmax_b # logits: [batch_size * seq_length, num_classes]
self.prediction = tf.nn.softmax(logits=self.logits, name='prediction')
return self.prediction, self.logits
def acc(self):
correct_prediction = tf.equal(tf.cast(tf.argmax(self.prediction, 1), tf.int32), tf.reshape(self.y, [-1]))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
return self.accuracy
def compute_cost(self):
y_reshaped = tf.reshape(self.targets,
self.logits.get_shape()) # y_reshaped: [batch_size * seq_length, num_classes]
# Softmax cross entropy loss
loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits,
labels=y_reshaped) # loss: [batch_size, seq_length]
self.loss = tf.reduce_mean(loss)
# return self.loss
def optimizer(self):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, tvars), self.grad_clip)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_optimizer = optimizer.apply_gradients(zip(grads, tvars))
# return self.train_optimizer
class conf(object):
batch_size = 100 # Sequences per batch
num_steps = 100 # Number of sequence steps per batch
lstm_size = 512 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.001 # Learning rate
keep_prob = 0.5 # Dropout keep probability
grad_clip = 5
num_classes = len(vocab)
num_epochs = 10
# 每n轮进行一次变量保存
save_every_n = 200
def train():
"""
语言模型的训练
Returns:
"""
global new_state
model = language_model(conf.num_classes, conf.batch_size, conf.num_steps, conf.learning_rate, conf.num_layers,
conf.lstm_size, conf.keep_prob, conf.grad_clip, is_training=True)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
counter = 0
for epoch in range(conf.num_epochs):
for x_batch, y_batch in get_batch(encoded, conf.batch_size, conf.num_steps):
counter += 1
start = time.time()
if epoch == 0:
feed_dict = {
model.x: x_batch,
model.y: y_batch
}
else:
feed_dict = {
model.x: x_batch,
model.y: y_batch,
model.initial_state: new_state
}
_, batch_loss, new_state, acc, predict = sess.run([model.train_optimizer,
model.loss,
model.final_state,
model.accuracy,
model.prediction],
feed_dict=feed_dict)
end = time.time()
# control the print lines
if counter % 100 == 0:
print "pre: ", sess.run(tf.cast(tf.argmax(predict, 1), tf.int32))
print 'y_batch: ', sess.run(tf.reshape(y_batch, [-1]))
print 'train_acc: ', acc
print '轮数: {}/{}... '.decode('utf-8').format(epoch + 1, conf.num_epochs), \
'训练步数: {}... '.decode('utf-8').format(counter),\
'训练误差: {:.4f}... '.decode('utf-8').format(batch_loss),\
'{:.4f} sec/batch'.decode('utf-8').format((end - start))
if counter % conf.save_every_n == 0:
saver.save(sess, 'checkpoints/i{}_l{}.ckpt'.decode('utf-8').format(counter, conf.lstm_size))
saver.save(sess, 'checkpoints/i{}_l{}.ckpt'.decode('utf-8').format(counter, conf.lstm_size))
def pick_top_n(preds, vocab_size, top_n=5):
"""
从预测结果中选取前top_n个最可能的字符
preds: 预测结果
vocab_size
top_n
"""
p = np.squeeze(preds)
# 将除了top_n个预测值的位置都置为0
p[np.argsort(p)[:-top_n]] = 0
# 归一化概率
p = p / np.sum(p)
# 随机选取一个字符
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def generate_samples(checkpoint, num_samples, prime='The '):
samples = [char for char in prime]
model = language_model(conf.num_classes, conf.batch_size, conf.num_steps, conf.learning_rate, conf.num_layers,
conf.lstm_size, conf.keep_prob, conf.grad_clip, is_training=False)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0, 0] = vocab_to_int[c]
feed_dict = {model.x: x}
predicts = sess.run(model.prediction, feed_dict=feed_dict)
c = pick_top_n(predicts, len(vocab))
samples.append(int_to_vocab[c])
for i in range(num_samples):
x[0, 0] = c
feed_dict = {model.x: x,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction,
model.final_state],
feed_dict=feed_dict)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
if __name__ == '__main__':
train()
tf.train.latest_checkpoint('checkpoints')
# 选用最终的训练参数作为输入进行文本生成
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = generate_samples(checkpoint, 20000, prime="The ")
print(samp)
# 问题1 其中还存在的问题,程序每运行一次vocab_to_int都会改变,导致train和predict不能分开。
# 解决方法:将vocab_to_int保存成pickle文件,predict的时候重新load就可以了。
| StarcoderdataPython |
93514 | <gh_stars>10-100
from numpy import zeros, matrix, array, random
from middleware import CodeRedLib
from time import time
from math import floor
def experiment(n, k, samples):
profiles = zeros((6, k))
G = random.randint(0,2, size=(k, n), dtype="bool")
red = CodeRedLib(G)
for s in range(samples):
red.Randomize()
red.LLL()
profiles[0] += array(sorted(list(red.l), reverse=True)) / (1.*samples)
red.Randomize()
red.Systematize()
profiles[1] += array(sorted(list(red.l), reverse=True)) / (1.*samples)
red.LLL()
profiles[2] += array(sorted(list(red.l), reverse=True)) / (1.*samples)
red.Randomize()
red.Systematize()
red.EpiSort()
profiles[3] += array(sorted(list(red.l), reverse=True)) / (1.*samples)
red.LLL()
profiles[4] += array(sorted(list(red.l), reverse=True)) / (1.*samples)
red.SizeRedBasis()
red.KillTwos()
profiles[5] += array(sorted(list(red.l), reverse=True)) / (1.*samples)
return profiles
n = 1280
k = int(n/2)
samples = 100
M = experiment(n, k, samples)
C = M.transpose()
print("index, pLLL_raw, pSys, pLLL_Sys, pSort, pLLL_Sort, pLLL_Sort_K2")
for i in range(25):
print(i+1, ", \t %.2f,\t %.2f,\t %.2f,\t %.2f,\t %.2f, \t %.2f \t"%tuple(C[i]))
| StarcoderdataPython |
3437295 | import numpy as np
from numpy import ndarray
from dataclasses import dataclass
from scipy.spatial.transform import Rotation
from config import DEBUG
from cross_matrix import get_cross_matrix
@dataclass
class RotationQuaterion:
"""Class representing a rotation quaternion (norm = 1). Has some useful
methods for converting between rotation representations.
Hint: You can implement all methods yourself, or use scipys Rotation class.
scipys Rotation uses the xyzw notation for quats while the book uses wxyz
(this i really annoying, I know).
Args:
real_part (float): eta (n) in the book, w in scipy notation
vec_part (ndarray[3]): epsilon in the book, (x,y,z) in scipy notation
"""
real_part: float
vec_part: 'ndarray[3]'
def __post_init__(self):
if DEBUG:
assert len(self.vec_part) == 3
norm = np.sqrt(self.real_part**2 + sum(self.vec_part**2))
if not np.allclose(norm, 1):
self.real_part /= norm
self.vec_part /= norm
if self.real_part < 0:
self.real_part *= -1
self.vec_part *= -1
def multiply(self, other: 'RotationQuaterion') -> 'RotationQuaterion':
"""Multiply two rotation quaternions
Hint: see (10.33)
As __matmul__ is implemented for this class, you can use:
q1@q2 which is equivalent to q1.multiply(q2)
Args:
other (RotationQuaternion): the other quaternion
Returns:
quaternion_product (RotationQuaternion): the product
"""
real_part = self.real_part*other.real_part - self.vec_part@other.vec_part
vec_part = self.vec_part*other.real_part + (self.real_part*np.eye(3) + get_cross_matrix(self.vec_part))@other.vec_part
quaternion_product = RotationQuaterion(real_part, vec_part)
return quaternion_product
def conjugate(self) -> 'RotationQuaterion':
"""Get the conjugate of the RotationQuaternion"""
conj = RotationQuaterion(self.real_part, -self.vec_part)
return conj
def as_rotmat(self) -> 'ndarray[3,3]':
"""Get the rotation matrix representation of self
Returns:
R (ndarray[3,3]): rotation matrix
"""
scalar_last_quat = np.append(self.vec_part, self.real_part)
R = Rotation.from_quat(scalar_last_quat).as_matrix()
return R
@property
def R(self) -> 'ndarray[3,3]':
return self.as_rotmat()
def as_euler(self) -> 'ndarray[3]':
"""Get the euler angle representation of self
Returns:
euler (ndarray[3]): extrinsic xyz euler angles (roll, pitch, yaw)
"""
scalar_last_quat = np.append(self.vec_part, self.real_part)
euler = Rotation.from_quat(scalar_last_quat).as_euler('xyz', degrees=False)
return euler
def as_avec(self) -> 'ndarray[3]':
"""Get the angles vector representation of self
Returns:
euler (ndarray[3]): extrinsic xyz euler angles (roll, pitch, yaw)
"""
scalar_last_quat = np.append(self.vec_part, self.real_part)
avec = Rotation.from_quat(scalar_last_quat).as_rotvec()
return avec
@staticmethod
def from_euler(euler: 'ndarray[3]') -> 'RotationQuaterion':
"""Get a rotation quaternion from euler angles
usage: rquat = RotationQuaterion.from_euler(euler)
Args:
euler (ndarray[3]): extrinsic xyz euler angles (roll, pitch, yaw)
Returns:
rquat (RotationQuaternion): the rotation quaternion
"""
scipy_quat = Rotation.from_euler('xyz', euler).as_quat()
rquat = RotationQuaterion(scipy_quat[3], scipy_quat[:3])
return rquat
def _as_scipy_quat(self):
"""If you're using scipys Rotation class, this can be handy"""
return np.append(self.vec_part, self.real_part)
def __iter__(self):
return iter([self.real_part, self.vec_part])
def __matmul__(self, other) -> 'RotationQuaterion':
"""Lets u use the @ operator, q1@q2 == q1.multiply(q2)"""
return self.multiply(other)
| StarcoderdataPython |
1685304 | <filename>examples/thermoelectric_fridge.py<gh_stars>0
#!/usr/bin/env python
"""
Create a thermoelectric fridge controller to control motor and peltier from a 12 folt source.
"""
from simple_skidl_parts.analog.power import *
from simple_skidl_parts.analog.vdiv import *
from simple_skidl_parts.units.linear import *
from simple_skidl_parts.analog.led import led_simple, LedSingleColors
from simple_skidl_parts.parts_wrapper import create_bom
from simple_skidl_parts.analog.resistors import small_resistor as R
from skidl import *
_R = Part("Device", "R", footprint='Resistor_SMD:R_0805_2012Metric', dest=TEMPLATE)
def main():
v12 = Net("12V")
v12.drive = POWER
gnd = Net("GND")
gnd.drive = POWER
v5 = Net("5V")
v5.drive = POWER
# Create a 5v net
low_dropout_power(v12, v5, gnd, 16, 5, 0.5, True)
mcu = Part("MCU_Microchip_ATtiny", "ATtiny85-20PU", footprint="DIP-8_W7.62mm_LongPads")
mcu[4] += gnd
mcu[8] += v5
# Reminder:
# Analog in: A1[7], A2[2], A3[3]
# Wire: PB3[2]
# PWM: [5], [6]
# We'll use PB3 for the 1-wire communications and the A3 for voltage.
divided = Net("VDIV")
vdiv(v12, divided, gnd, ratio=4, rtot=10*K)
mcu[3] += divided
to_motor = Net("MOTOR")
to_tec = Net("THEC")
to_bldc_motor = Net("MOTOR-5V")
wire1 = Net("WIRE")
dc_motor_on_off(mcu[1], to_motor, gnd)
dc_motor_on_off(mcu[6], to_tec, gnd)
wire_resistor = R(4700)
mcu[2] | wire_resistor[1]
wire_resistor[2] | wire1
connect = Part("Connector", "Screw_Terminal_01x02", footprint="TerminalBlock_MetzConnect_Type055_RT01502HDWU_1x02_P5.00mm_Horizontal", dest=TEMPLATE)
connect_motor, connect_tec, connect_pow, connect_wire_pow, connect_wire_data = connect(5)
for c, n in zip([connect_motor, connect_tec, connect_pow, connect_wire_pow, connect_wire_data],
["MOTOR", "TEC", "PWR", "5V-PWR", "WIRE"]):
c.ref = n
connect_fan_1, connect_fan_2 = [Part("Connector", "Conn_01x02_Male", footprint="PinHeader_1x02_P2.54mm_Vertical") for _ in range(2)]
dc_motor_on_off(mcu[5], to_bldc_motor, gnd, v_signal_min=5, motor_current_max=3)
for c in [connect_fan_1, connect_fan_2]:
c.ref = to_bldc_motor.name
c[1] += v5
c[2] += to_bldc_motor
led = led_simple(sig_voltage=5.0, color=LedSingleColors.RED, size=1.6)
led.signal += v5
led.gnd += to_bldc_motor
# Add LEDs
for m in [to_motor, to_tec]:
led = led_simple(sig_voltage=15.0, color=LedSingleColors.RED, size=1.6)
led.signal += v12
led.gnd += m
led = led_simple(sig_voltage=5.0, color=LedSingleColors.YELLOW, size=2.0)
led.signal += v5
led.gnd += gnd
led = led_simple(sig_voltage=5.0, color=LedSingleColors.BLUE, size=2.0)
led.signal += mcu[5]
led.gnd += gnd
connect_motor[1] += to_motor
connect_motor[2] += v12
tvs = Part("Device", "D_TVS_ALT", value="17V", footprint="D_DO-15_P3.81mm_Vertical_KathodeUp")
for p in range(2):
tvs[p+1] += connect_motor[p+1]
connect_tec[1] += to_tec
connect_tec[2] += v12
connect_pow[1] += v12
connect_pow[2] += gnd
connect_wire_pow[1] += v5
connect_wire_pow[2] += gnd
connect_wire_data[1] += wire1
connect_wire_data[2] += wire1
ERC()
create_bom("JLCPCB", "/tmp/bom.csv", default_circuit)
generate_netlist(file_=open("/tmp/netlist.net", "w"))
if __name__ == "__main__":
main() | StarcoderdataPython |
3407700 | <reponame>KelOdgSmile/ml-cvnets<filename>utils/common_utils.py
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
import random
import torch
import numpy as np
from utils import logger
import os
from utils.ddp_utils import is_master
from cvnets.layers import norm_layers_tuple
def check_compatibility():
ver = torch.__version__.split('.')
major_version = int(ver[0])
minor_version = int(ver[0])
if major_version < 1 and minor_version < 7:
logger.error('Min pytorch version required is 1.7.0. Got: {}'.format('.'.join(ver)))
def check_frozen_norm_layer(model: torch.nn.Module) -> (bool, int):
if hasattr(model, 'module'):
model = model.module
count_norm = 0
frozen_state = False
for m in model.modules():
if isinstance(m, norm_layers_tuple):
frozen_state = m.weight.requires_grad
return frozen_state, count_norm
def device_setup(opts):
random_seed = getattr(opts, "common.seed", 0)
random.seed(random_seed)
torch.manual_seed(random_seed)
np.random.seed(random_seed)
is_master_node = is_master(opts)
if is_master_node:
logger.log('Random seeds are set to {}'.format(random_seed))
logger.log('Using PyTorch version {}'.format(torch.__version__))
n_gpus = torch.cuda.device_count()
if n_gpus == 0:
if is_master_node:
logger.warning('No GPUs available. Using CPU')
device = torch.device('cpu')
n_gpus = 0
else:
if is_master_node:
logger.log('Available GPUs: {}'.format(n_gpus))
device = torch.device('cuda')
if torch.backends.cudnn.is_available():
import torch.backends.cudnn as cudnn
torch.backends.cudnn.enabled = True
cudnn.benchmark = False
cudnn.deterministic = True
if is_master_node:
logger.log('CUDNN is enabled')
setattr(opts, "dev.device", device)
setattr(opts, "dev.num_gpus", n_gpus)
return opts
def create_directories(dir_path: str, is_master_node: bool) -> None:
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
if is_master_node:
logger.log('Directory created at: {}'.format(dir_path))
else:
if is_master_node:
logger.log('Directory exists at: {}'.format(dir_path))
| StarcoderdataPython |
1675911 | """
Copyright (c) 2016-2020 <NAME> http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import urllib.parse
import os
from programy.services.base import ServiceQuery
from programy.services.rest.base import RESTService
from programy.services.rest.base import RESTServiceException
from programy.utils.logging.ylogger import YLogger
class MirosoftSearchQuery(ServiceQuery):
@staticmethod
def create(service):
return MirosoftSearchQuery(service)
def parse_matched(self, matched):
self._query = ServiceQuery._get_matched_var(matched, 0, "query")
def __init__(self, service):
ServiceQuery.__init__(self, service)
self._query = None
def execute(self):
return self._service.search(self._query)
def aiml_response(self, response):
payload = response['response']['payload']
webPages = payload['webPages']
values = webPages['value']
result = "<ul>\n"
result += "\n".join(["<li>{0}</li>".format(x['snippet']) for x in values])
result += "</ul>"
return result
class MicrosoftSearchServiceException(RESTServiceException):
def __init__(self, msg):
RESTServiceException.__init__(self, msg)
class MicrosoftSearchService(RESTService):
"""
https://portal.azure.com/
"""
PATTERNS = [
[r"SEARCH\s(.+)", MirosoftSearchQuery]
]
BASE_SEARCH_URL="https://chatilly.cognitiveservices.azure.com/bing/v7.0"
def __init__(self, configuration):
RESTService.__init__(self, configuration)
self._key = None
def initialise(self, client):
self._key = client.license_keys.get_key('BING_SEARCH_KEY')
if self._key is None:
YLogger.error(self, "BING_SEARCH_KEY missing from license.keys, service will not function correctly!")
def patterns(self) -> list:
return MicrosoftSearchService.PATTERNS
def get_default_aiml_file(self):
return os.path.dirname(__file__) + os.sep + "search.aiml"
@staticmethod
def get_default_conf_file():
return os.path.dirname(__file__) + os.sep + "search.conf"
def _build_search_url(self, query):
if self.configuration.url is not None:
url = self.configuration.url
else:
url = MicrosoftSearchService.BASE_SEARCH_URL
url += "/search?q={0}".format(urllib.parse.quote(query))
return url
def _build_search_headers(self):
return {"Ocp-Apim-Subscription-Key": self._key}
def search(self, query):
url = self._build_search_url(query)
headers = self._build_search_headers()
response = self.query('search', url, headers=headers)
return response
def _response_to_json(self, api, response):
return response.json()
| StarcoderdataPython |
3556824 | import json
from http import HTTPStatus
USERS = {
'railgun': {
'name': '<NAME>',
'age': 14,
'city': 'Gakuen-toshi',
'country': 'Japan'
},
'imagine-breaker': {
'name': '<NAME>',
'age': 15,
'city': 'Gakuen-toshi',
'country': 'Japan'
},
'index': {
'name': 'Index-Librorum-Prohibitorum',
'age': 14,
'city': 'London?',
'country': 'United Kingdom'
}
}
def lambda_handler(event: dict, context):
http_method = event['httpMethod']
resource = event['resource']
response_status = None
response_body = {}
if http_method == 'GET':
response_body = {
'method': 'get information',
'data': None
}
if resource == '/hello-world':
response_body['data'] = USERS
response_status = HTTPStatus.OK
elif resource == '/hello-world/{id}':
response_body['data'] = USERS.get(event['pathParameters']['id'])
if not response_body['data']:
response_status = HTTPStatus.NOT_FOUND
else:
response_status = HTTPStatus.OK
elif http_method == 'POST':
response_body = {
'method': 'insert information',
'data': json.loads(event['body']) if event['body'] else None
}
response_status = HTTPStatus.CREATED
# Do nothing (it's sample)
return {
'statusCode': response_status,
'body': json.dumps(response_body)
}
| StarcoderdataPython |
3245566 | <filename>python/AULAS/aula016.py
# lanche = ('Hambúrguer', 'Suco', 'Pizza', 'Pudim')
# for comida in lanche:
# print(lanche)
# for cont in range(0, len(lanche)):
# print(f'{cont + 1}° eu vou comer {lanche[cont]}')
# for pos, cont in enumerate(lanche):
# print(f'{pos + 1}° eu vou comer {cont}')
# print(sorted(lanche))
# print(lanche)
# a = (2, 5, 4)
# b = (5, 8, 1, 2)
# c = a + b
# d = b + a
# print(a, end='a ')
# print(b, end='b ')
# print(c, end='c ')
# print(d, end='d ')
# pessoa = ('Gustavo', 39, 'M', 99.88)
# del(pessoa)
# print(pessoa)
| StarcoderdataPython |
8039470 | <reponame>1696012928/RoomAI
#!/bin/python
#coding:utf-8
import roomai.common
import copy
import logging
import random
import sys
from functools import cmp_to_key
from roomai.fivecardstud import FiveCardStudPokerCard
from roomai.fivecardstud import FiveCardStudPublicState
from roomai.fivecardstud import FiveCardStudPersonState
from roomai.fivecardstud import FiveCardStudPrivateState
from roomai.fivecardstud import FiveCardStudAction
class FiveCardStudEnv(roomai.common.AbstractEnv):
'''
FiveCardStud game enviroment
'''
#@override
def init(self, params = dict()):
'''
Initialize FiveCardStud game enviroment with the params. The params are as follows:
1) num_normal_players denotes how many players join in this game, default 3
2) chips denotes the initialization chips of players, default [500,500,500]
3) floor_bet denotes the minimal bet, default 10
4) backward_enable denotes whether the environment will stores all history information. If you need call the backward function, please set it to bet True. default False
An example of params is {"num_normal_players":3,"chips":[500,500,500]}
:param params: initialization param
:return: infos, public_state, person_states, private_state
'''
self.logger = roomai.get_logger()
self.__params__ = dict()
if "num_normal_players" in params:
self.__params__["num_normal_players"] = params["num_normal_players"]
else:
self.__params__["num_normal_players"] = 3
if "chips" in params:
self.__params__["chips"] = params["chips"]
else:
self.__params__["chips"] = [500 for i in range(self.__params__["num_normal_players"])]
if "floor_bet" in params:
self.__params__["floor_bet"] = params["floor_bet"]
else:
self.__params__["floor_bet"] = 10
if "backward_enable" in params:
self.__params__["backward_enable"] = params["backward_enable"]
else:
self.__params__["backward_enable"] = False
allcards = []
for i in range(13):
for j in range(4):
allcards.append(FiveCardStudPokerCard(i, j))
random.shuffle(allcards)
FiveCardStudEnv.__valid_initialization_params__(self)
self.public_state = FiveCardStudPublicState()
self.private_state = FiveCardStudPrivateState()
self.person_states = [FiveCardStudPersonState() for i in range(self.__params__["num_normal_players"]+1)]
self.public_state_history = []
self.private_state_history = []
self.person_states_history = []
## private_state
self.private_state.all_hand_cards = allcards[0: 5 * self.__params__["num_normal_players"]]
## public_state
self.public_state.num_normal_players = self.__params__["num_normal_players"]
self.public_state.chips = self.__params__["chips"]
self.public_state.second_hand_cards = self.private_state.all_hand_cards[1*self.__params__["num_normal_players"]: 2 * self.__params__["num_normal_players"]]
self.public_state.floor_bet = self.__params__["floor_bet"]
self.public_state.upper_bet = min(self.public_state.chips)
#print "public_state.upper_bet", self.public_state.upper_bet,"chips", self.public_state.chips
self.public_state.bets = [self.public_state.floor_bet for i in range(self.__params__["num_normal_players"])]
self.public_state.chips = [self.public_state.chips[i] - self.public_state.floor_bet for i in range(self.__params__["num_normal_players"])]
self.public_state.max_bet_sofar = self.public_state.floor_bet
self.public_state.is_quit = [False for i in range(self.__params__["num_normal_players"])]
self.public_state.num_quit = 0
self.public_state.is_needed_to_action = [True for i in range(self.__params__["num_normal_players"])]
self.public_state.num_needed_to_action = self.__params__["num_normal_players"]
self.public_state.is_raise = [False for i in range(self.__params__["num_normal_players"])]
self.public_state.num_raise = 0
self.public_state.round = 1
self.public_state.__turn__ = FiveCardStudEnv.__choose_player_at_begining_of_round__(self.public_state)
self.public_state.__is_terminal__ = False
self.public_state.__scores__ = None
## person_state
for i in range(self.__params__["num_normal_players"]):
self.person_states[i].__id__ = i
self.person_states[i].first_hand_card = self.private_state.all_hand_cards[i]
self.person_states[i].second_hand_card = self.private_state.all_hand_cards[self.__params__["num_normal_players"]+i]
self.person_states[self.__params__["num_normal_players"]] .__id__= self.__params__["num_normal_players"]
turn = self.public_state.turn
self.person_states[turn].__available_actions__ = FiveCardStudEnv.available_actions(self.public_state, self.person_states[turn])
self.__gen_state_history_list__()
infos = self.__gen_infos__()
return infos, self.public_state, self.person_states, self.private_state
#@override
def forward(self, action):
'''
The environment steps foward with the action
:param action:
:return:
'''
turn = self.public_state.turn
if not FiveCardStudEnv.is_action_valid(action,self.public_state, self.person_states[turn]):
self.logger.critical("action=%s is invalid" % (action.key()))
raise ValueError("action=%s is invalid" % (action.key()))
pu = self.public_state
pe = self.person_states
pr = self.private_state
pu.__action_history__.append((self.public_state.turn,action))
pe[pu.turn].__available_actions__ = dict()
if action.option == FiveCardStudAction.Fold:
self.action_fold(action)
elif action.option == FiveCardStudAction.Check:
self.action_check(action)
elif action.option == FiveCardStudAction.Call:
self.action_call(action)
elif action.option == FiveCardStudAction.Raise:
self.action_raise(action)
elif action.option == FiveCardStudAction.Showhand:
self.action_showhand(action)
elif action.option == FiveCardStudAction.Bet:
self.action_bet(action)
else:
raise Exception("action.option(%s) not in [Fold, Check_, Call, Raise, Showhand, Bet]" % (action.option))
##pu.previous_id = pu.turn
#pu.previous_action = action
pu.__action_history__.append((pu.turn, action))
pu.previous_round = pu.round
# computing_score
if FiveCardStudEnv.__is_compute_scores__(self.public_state):
num_normal_players = pu.num_normal_players
pu.hand_cards = []
pu.first_hand_cards = pr.all_hand_cards[0: 1 * num_normal_players]
pu.second_hand_cards = pr.all_hand_cards[1 * num_normal_players: 2 * num_normal_players]
pu.third_hand_cards = pr.all_hand_cards[2 * num_normal_players: 3 * num_normal_players]
pu.fourth_hand_cards = pr.all_hand_cards[3 * num_normal_players: 4 * num_normal_players]
pu.fifth_hand_cards = pr.all_hand_cards[4 * num_normal_players: 5 * num_normal_players]
pu.round = 4
pu.__is_terminal__ = True
pu.__scores__ = self.__compute_scores__(pu)
for i in range(num_normal_players):
pu.chips[i] += pu.bets[i] + pu.scores[i]
for i in range(num_normal_players):
pe[i].first_hand_card = pr.all_hand_cards[0 * num_normal_players + i]
pe[i].second_hand_card = pr.all_hand_cards[1 * num_normal_players + i]
pe[i].third_hand_card = pr.all_hand_cards[2 * num_normal_players + i]
pe[i].fourth_hand_card = pr.all_hand_cards[3 * num_normal_players + i]
pe[i].fifth_hand_card = pr.all_hand_cards[4 * num_normal_players + i]
pu.__turn__ = 0
# enter into the next stage
elif FiveCardStudEnv.is_nextround(self.public_state):
num_normal_players = self.public_state.num_normal_players
add_cards = []
if pu.round == 1:
pu.third_hand_cards = pr.all_hand_cards[2 * num_normal_players: 3 * num_normal_players]
for i in range(num_normal_players):
pe[i].third_hand_card = pr.all_hand_cards[2 * num_normal_players + i]
if pu.round == 2:
pu.fourth_hand_cards = pr.all_hand_cards[3 * num_normal_players: 4 * num_normal_players]
for i in range(num_normal_players):
pe[i].fourth_hand_card = pr.all_hand_cards[3 * num_normal_players + i]
if pu.round == 3:
pu.fifth_hand_cards = pr.all_hand_cards[4 * num_normal_players: 5 * num_normal_players]
for i in range(num_normal_players):
pe[i].fifth_hand_card = pr.all_hand_cards[4 * num_normal_players + i]
pu.round = pu.round + 1
pu.__turn__ = FiveCardStudEnv.__choose_player_at_begining_of_round__(pu)
pu.num_needed_to_action = 0
for i in range(self.__params__["num_normal_players"]):
if pu.is_quit[i] == False and pu.bets[i] < pu.upper_bet:
pu.is_needed_to_action[i] = True
pu.num_needed_to_action += 1
pu.is_raise[i] = False
pu.num_raise = 0
pe[pu.turn].__available_actions__ = FiveCardStudEnv.available_actions(pu, pe[pu.turn])
else:
pu.__turn__ = self.__next_player__(pu)
pe[pu.turn].__available_actions__ = FiveCardStudEnv.available_actions(pu, pe[pu.turn])
self.__gen_state_history_list__()
infos = self.__gen_infos__()
return infos, self.public_state, self.person_states, self.private_state
#@override
@classmethod
def compete(cls, env, players):
'''
:param env: the fivecardstud game environment
:param players: the list of players. The n-1 player is AI bot and the last player is the chance player
:return: scores
'''
num_normal_players = len(players) - 1
total_scores = [0 for i in range(num_normal_players)]
total_count = 1000
for count in range(total_count):
chips = [(100 +int(random.random()*500)) for i in range(num_normal_players)]
floor_bet = 10
infos, public, persons, private = env.init({"num_normal_players":num_normal_players,"chips":chips, "floor_bet":10})
for i in range(len(players)):
players[i].receive_info(infos[i])
while public.is_terminal == False:
turn = public.turn
action = players[turn].take_action()
infos, public, persons, private = env.forward(action)
for i in range(len(players)):
players[i].receive_info(infos[i])
for i in range(num_normal_players):
players[i].reset()
total_scores[i] += public.scores[i]
if (count + 1)%500 == 0:
tmp_scores = [0 for i in range(len(total_scores))]
for i in range(len(total_scores)):
tmp_scores[i] = total_scores[i] / (count+1)
roomai.get_logger().info("FiveCardStud completes %d competitions, scores=%s"%(count+1, ",".join([str(i) for i in tmp_scores])))
for i in range(num_normal_players):
total_scores[i] /= total_count * 1.0
return total_scores;
def action_fold(self, action):
pu = self.public_state
pu.is_quit[pu.turn] = True
pu.num_quit += 1
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
def action_check(self, action):
pu = self.public_state
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
def action_call(self, action):
pu = self.public_state
pu.chips[pu.turn] -= action.price
pu.bets[pu.turn] += action.price
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
def action_bet(self, action):
pu = self.public_state
pu.chips[pu.turn] -= action.price
pu.bets[pu.turn] += action.price
pu.max_bet_sofar = pu.bets[pu.turn]
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
p = (pu.turn + 1)%pu.num_normal_players
while p != pu.turn:
if pu.is_quit[p] == False and pu.is_needed_to_action[p] == False and pu.bets[p] < pu.upper_bet:
pu.num_needed_to_action += 1
pu.is_needed_to_action[p] = True
p = (p + 1) % pu.num_normal_players
def action_raise(self, action):
pu = self.public_state
pu.chips[pu.turn] -= action.price
pu.bets[pu.turn] += action.price
pu.max_bet_sofar = pu.bets[pu.turn]
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
pu.is_raise[pu.turn] = True
pu.num_raise +=1
p = (pu.turn + 1)%pu.num_normal_players
while p != pu.turn:
if pu.is_quit[p] == False and pu.is_needed_to_action[p] == False and pu.bets[p] < pu.upper_bet:
pu.num_needed_to_action += 1
pu.is_needed_to_action[p] = True
p = (p + 1) % pu.num_normal_players
def action_showhand(self, action):
pu = self.public_state
pu.bets[pu.turn] += action.price
pu.chips[pu.turn] = 0
pu.is_needed_to_action[pu.turn] = False
pu.num_needed_to_action -= 1
if pu.bets[pu.turn] > pu.max_bet_sofar:
p = (pu.turn + 1) % pu.num_normal_players
while p != pu.turn:
if pu.is_quit[p] == False and pu.is_needed_to_action[p] == False and pu.bets[p] < pu.upper_bet:
pu.num_needed_to_action += 1
pu.is_needed_to_action[p] = True
p = (p + 1) % pu.num_normal_players
pu.is_raise[pu.turn] = True
pu.max_bet_sofar = pu.bets[pu.turn]
pu.num_raise = False
############################################# Utils Function ######################################################
@classmethod
def __valid_initialization_params__(cls, env):
if len(env.__params__["chips"]) != env.__params__["num_normal_players"] :
raise ValueError("len(env.chips)%d != env.num_normal_players%d"%(len(env.__params__["chips"]), env.__params__["num_normal_players"]))
if env.__params__["num_normal_players"] * 5 > 52:
raise ValueError("env.num_normal_players * 5 must be less than 51, now env.num_normal_players = %d"%(env.__params__["num_normal_players"]))
return True
@classmethod
def __is_compute_scores__(cls, public_state):
'''
:param public_state:
:return:
'''
pu = public_state
if pu.num_quit == pu.num_normal_players - 1:
return True
if pu.round == 4 and pu.num_needed_to_action == 0:
return True
if pu.num_needed_to_action == 0 and pu.max_bet_sofar == pu.upper_bet:
return True
return False
@classmethod
def __compute_scores__(cls, public_state):
'''
:param public_state:
:return:
'''
if public_state.num_quit + 1 == public_state.num_normal_players:
player_id = 0
for i in range(public_state.num_normal_players):
if public_state.is_quit[i] == False:
player_id = i
scores = [0 for k in range(public_state.num_normal_players)]
for p in range(public_state.num_normal_players):
if p == player_id:
scores[p] = sum(public_state.bets) - public_state.bets[p]
else:
scores[p] = -public_state.bets[p]
for p in range(public_state.num_normal_players):
scores[p] /= public_state.floor_bet * 1.0
return scores
raise ValueError("__compute_scores__ error, is_quit = ", public_state.is_quit, "num_quit=", public_state.num_quit)
max_cards = [public_state.first_hand_cards[0],\
public_state.second_hand_cards[0], public_state.third_hand_cards[0],\
public_state.fourth_hand_cards[0], public_state.fifth_hand_cards[0]]
max_id = 0
for i in range(1, public_state.num_normal_players):
tmp = [public_state.first_hand_cards[i],\
public_state.second_hand_cards[i], public_state.third_hand_cards[i],\
public_state.fourth_hand_cards[i], public_state.fifth_hand_cards[i]]
if FiveCardStudEnv.compare_cards(max_cards, tmp) < 0:
max_cards = tmp
max_id = i
scores = [0 for i in range(public_state.num_normal_players)]
for i in range(public_state.num_normal_players):
if i == max_id:
scores[i] = sum(public_state.bets) - public_state.bets[i]
else:
scores[i] = -public_state.bets[i]
for i in range(public_state.num_normal_players):
scores[i] /= public_state.floor_bet * 1.0
return scores
@classmethod
def __choose_player_at_begining_of_round__(cls, public_state):
'''
:param public_state:
:return:
'''
round = public_state.round
if round in [1,2,3]:
public_cards = None
if round == 1: public_cards = public_state.second_hand_cards
elif round == 2: public_cards = public_state.third_hand_cards
elif round == 3: public_cards = public_state.fourth_hand_cards
max_id = 0
for i in range(public_state.num_normal_players):
if public_state.is_quit[i] == False:
max_id = i
break
max_card = public_cards[max_id]
for i in range(1, public_state.num_normal_players):
if FiveCardStudPokerCard.compare(max_card, public_cards[i]) < 0 and public_state.is_quit[i] == False:
max_card = public_cards[i]
max_id = i
return max_id
elif round == 4:
max_cards = [public_state.second_hand_cards[0], public_state.third_hand_cards[0],\
public_state.fourth_hand_cards[0], public_state.fifth_hand_cards[0]]
max_id = 0
for i in range(1, public_state.num_normal_players):
tmp = [public_state.second_hand_cards[i], public_state.third_hand_cards[i], \
public_state.fourth_hand_cards[i], public_state.fifth_hand_cards[i]]
if FiveCardStudEnv.compare_cards(max_cards, tmp) < 0:
max_cards = tmp
max_id = i
return max_id
else:
raise ValueError("pulic_state.round(%d) not in [1,2,3,4]"%(public_state.turn))
@classmethod
def __next_player__(self, pu):
i = pu.turn
if pu.num_needed_to_action == 0:
return -1
p = (i+1)%pu.num_normal_players
while pu.is_needed_to_action[p] == False:
p = (p+1)%pu.num_normal_players
return p
@classmethod
def is_action_valid(cls, action, public_state, person_state):
'''
:param action:
:param public_state:
:param person_state:
:return:
'''
if action.key not in person_state.available_actions:
return False
return True
@classmethod
def available_actions(cls, public_state, person_state):
'''
:param public_state: the public state of the game
:param person_state: the person state corresponding to the current player
:return:
'''
pu = public_state
round = pu.round
turn = pu.turn
Showhand_count = pu.upper_bet - pu.bets[turn]
Call_count = pu.max_bet_sofar - pu.bets[turn]
actions = dict()
if round == 1 or round == 2 or round == 3:
if pu.previous_round is None or pu.previous_round == round -1:
## bet
for i in range(int(Call_count+1), int(pu.upper_bet-pu.bets[turn])):
actions["Bet_%d"%i] = FiveCardStudAction.lookup("Bet_%d"%i)
## fold
actions["Fold_0"] = FiveCardStudAction.lookup("Fold_0")
## showhand
if Showhand_count > 0:
actions["Showhand_%d"%(Showhand_count)] = FiveCardStudAction.lookup("Showhand_%d"%Showhand_count)
## Check_
actions["Check_0"] = FiveCardStudAction.lookup("Check_0")
else:
## fold
actions["Fold_0"] = FiveCardStudAction.lookup("Fold_0")
## showhand
if Showhand_count > 0:
actions["Showhand_%d"%Showhand_count] = FiveCardStudAction.lookup("Showhand_%d"%(Showhand_count))
## Call
if Call_count < Showhand_count:
if Call_count == 0:
actions["Check_0"] = FiveCardStudAction.lookup("Check_0")
else:
actions["Call_%d"%(Call_count )] = FiveCardStudAction.lookup("Call_%d"%(Call_count))
## "raise"
if pu.is_raise[turn] == False:
for i in range(int(Call_count + 1),int(Showhand_count)):
actions["Raise_%d"%(i)] = FiveCardStudAction.lookup("Raise_%d"%i)
elif round == 4:
if pu.previous_round == round - 1:
## showhand
if Showhand_count > 0:
actions["Showhand_%d"%(Showhand_count)] = FiveCardStudAction.lookup("Showhand_%d"%(Showhand_count))
## bet
for i in range( Call_count + 1, int(pu.upper_bet) - int(pu.bets[turn])):
actions["Bet_%d"%i] = FiveCardStudAction.lookup("Bet_%d"%i)
## fold
actions["Fold_0"] = FiveCardStudAction.lookup("Fold_0")
else:
## fold
actions["Fold_0"] = FiveCardStudAction("Fold_0")
## Call
if Call_count == Showhand_count and Showhand_count > 0:
actions["Showhand_%d"%(Call_count)] = FiveCardStudAction.lookup("Showhand_%d"%(Call_count))
elif Call_count == 0:
actions["Check_0"] = FiveCardStudAction.lookup("Check_0")
else:
actions["Call_%d"%(Call_count )] = FiveCardStudAction.lookup("Call_%d"%(Call_count))
else:
raise ValueError("pulic_state.round(%d) not in [1,2,3,4]" % (public_state.turn))
return actions
@classmethod
def is_nextround(self, public_state):
'''
:return: A boolean variable indicates whether is it time to enter the next stage
'''
return public_state.num_needed_to_action == 0
@classmethod
def compare_cards(cls, cards1, cards2):
"""
Args:
cards1:
cards2:
Returns:
"""
if len(cards1) == len(cards2) and len(cards1) == 4:
pattern1 = cls.fourcards2pattern(cards1)
pattern2 = cls.fourcards2pattern(cards2)
if pattern1[5] != pattern2[5]:
return pattern1[5] - pattern2[5]
else:
cards1.sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
cards2.sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
return FiveCardStudPokerCard.compare(cards1[-1], cards2[-1])
elif len(cards1) == len(cards2) and len(cards1) == 5:
pattern1 = cls.cards2pattern(cards1)
pattern2 = cls.cards2pattern(cards2)
if pattern1[5] != pattern2[5]:
return pattern1[5] - pattern2[5]
else:
cards1.sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
cards2.sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
return FiveCardStudPokerCard.compare(cards1[-1], cards2[-1])
else:
raise ValueError("len(cards1)%d, and len(cards2)%d are same and are 4 or 5 "%(len(cards1),len(cards2)))
@classmethod
def cards2pattern(cls, cards):
"""
Args:
cards:
Returns:
"""
pointrank2cards = dict()
for c in cards:
if c.point_rank in pointrank2cards:
pointrank2cards[c.point_rank].append(c)
else:
pointrank2cards[c.point_rank] = [c]
for p in pointrank2cards:
pointrank2cards[p].sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
suitrank2cards = dict()
for c in cards:
if c.suit_rank in suitrank2cards:
suitrank2cards[c.suit_rank].append(c)
else:
suitrank2cards[c.suit_rank] = [c]
for s in suitrank2cards:
suitrank2cards[s].sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
num2pointrank = [[], [], [], [], []]
for p in pointrank2cards:
num = len(pointrank2cards[p])
num2pointrank[num].append(p)
for i in range(5):
num2pointrank[num].sort()
sorted_pointrank = []
for p in pointrank2cards:
sorted_pointrank.append(p)
sorted_pointrank.sort()
##straight_samesuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 5:
numStraight = 1
for i in range(len(suitrank2cards[s]) - 2, -1, -1):
if suitrank2cards[s][i].point_rank == suitrank2cards[s][i + 1].point_rank - 1:
numStraight += 1
else:
numStraight = 1
if numStraight == 5:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["Straight_SameSuit"]
return pattern
##4_1
if len(num2pointrank[4]) ==1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["4_1"]
return pattern
##3_2
if len(num2pointrank[3]) == 1 and len(num2pointrank[2]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["3_2"]
return pattern
##SameSuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 5:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["SameSuit"]
return pattern
##Straight_DiffSuit
numStraight = 1
for idx in range(len(sorted_pointrank) - 2, -1, -1):
if sorted_pointrank[idx] + 1 == sorted_pointrank[idx]:
numStraight += 1
else:
numStraight = 1
if numStraight == 5:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["Straight_DiffSuit"]
for p in range(idx, idx + 5):
point = sorted_pointrank[p]
return pattern
##3_1_1
if len(num2pointrank[3]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["3_1_1"]
return pattern
##2_2_1
if len(num2pointrank[2]) >= 2:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["2_2_1"]
return pattern
##2_1_1_1
if len(num2pointrank[2]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["2_1_1_1"]
return pattern
##1_1_1_1_1
return roomai.fivecardstud.FiveCardStudAllCardsPattern["1_1_1_1_1"]
@classmethod
def fourcards2pattern(cls, cards):
"""
Args:
cards:
Returns:
"""
pointrank2cards = dict()
for c in cards:
if c.point_rank in pointrank2cards:
pointrank2cards[c.point_rank].append(c)
else:
pointrank2cards[c.point_rank] = [c]
for p in pointrank2cards:
pointrank2cards[p].sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
suitrank2cards = dict()
for c in cards:
if c.suit_rank in suitrank2cards:
suitrank2cards[c.suit_rank].append(c)
else:
suitrank2cards[c.suit_rank] = [c]
for s in suitrank2cards:
suitrank2cards[s].sort(key = cmp_to_key(FiveCardStudPokerCard.compare))
num2pointrank = [[], [], [], [], []]
for p in pointrank2cards:
num = len(pointrank2cards[p])
num2pointrank[num].append(p)
for i in range(5):
num2pointrank[num].sort()
sorted_pointrank = []
for p in pointrank2cards:
sorted_pointrank.append(p)
sorted_pointrank.sort()
##candidate straight_samesuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 4:
numStraight = 1
for i in range(len(suitrank2cards[s]) - 2, -1, -1):
if suitrank2cards[s][i].point_rank == suitrank2cards[s][i + 1].point_rank - 1:
numStraight += 1
else:
numStraight = 1
if numStraight == 4:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["Straight_SameSuit"]
return pattern
##4_1
if len(num2pointrank[4]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["4_1"]
return pattern
##3_2 impossible
if len(num2pointrank[3]) == 1 and len(num2pointrank[2]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["3_2"]
return pattern
##SameSuit
for s in suitrank2cards:
if len(suitrank2cards[s]) >= 4:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["SameSuit"]
return pattern
##Straight_DiffSuit
numStraight = 1
for idx in range(len(sorted_pointrank) - 2, -1, -1):
if sorted_pointrank[idx] + 1 == sorted_pointrank[idx]:
numStraight += 1
else:
numStraight = 1
if numStraight == 4:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["Straight_DiffSuit"]
return pattern
##3_1_1
if len(num2pointrank[3]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["3_1_1"]
return pattern
##2_2_1
if len(num2pointrank[2]) >= 2:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["2_2_1"]
return pattern
##2_1_1_1
if len(num2pointrank[2]) == 1:
pattern = roomai.fivecardstud.FiveCardStudAllCardsPattern["2_1_1_1"]
return pattern
##1_1_1_1_1
return roomai.fivecardstud.FiveCardStudAllCardsPattern["1_1_1_1_1"]
def __deepcopy__(self, memodict={}, newinstance = None):
if newinstance is None:
newinstance = FiveCardStudEnv()
newinstance = super(FiveCardStudEnv, self).__deepcopy__(newinstance=newinstance)
return newinstance | StarcoderdataPython |
336223 | <gh_stars>0
from collections import OrderedDict
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from mkt.api.base import CORSMixin
from mkt.constants.features import APP_FEATURES, FeatureProfile
class AppFeaturesList(CORSMixin, APIView):
authentication_classes = permission_classes = []
cors_allowed_methods = ['get']
def _feature(self, i, slug):
feature = APP_FEATURES[slug.upper()]
data = {
'apis': feature['apis'],
'name': feature['name'],
'description': feature['description'],
'position': i + 1,
}
if self.profile:
data['present'] = self.profile.get(slug.lower(), False)
return (slug.lower(), data)
def get(self, request, *args, **kwargs):
if 'pro' in request.GET:
self.profile = FeatureProfile.from_signature(request.GET['pro'])
else:
self.profile = None
features = OrderedDict(self._feature(i, slug) for i, slug in
enumerate(APP_FEATURES.keys()))
return Response(features, status=status.HTTP_200_OK)
| StarcoderdataPython |
3277777 | <reponame>DeliciousLlama/MCturtle
from mcpi.minecraft import Minecraft #Importing MCPI, which is necessary for MCpen
import time #Yet another import (not that important)
from MCpen.mcturtle import MCTurtle, direction #Imports imports imports. (This one is important because it actually import the MCpen library)
#Please note: when you do [pip install MCpen], you do not need to include the MCpen before the [mcturtle]. This is for the convience for those who cannot use the pip install method.
import random
#More unnecesasry imports
mc = Minecraft.create("localhost") #This initializes the connection to a server, in this case it is 'local host', meaning that the server is local on your computer.
playerId = mc.getPlayerEntityId("GnarlyLlama") #This gets the Player ID, which contains all sorts of stuff ranging from the player's position and stuff like that.
# Note: if you wish to use your own player ID, please replace the current user name with your user name.
pos = mc.entity.getPos(playerId) # We get the position of the player we appointed to earlier.
px = pos.x # Get the X coordinate of the player's position
py = pos.y # Get the Y coordinate of the player's position
pz = pos.z # Get the Z coordinate of the player's position
t = MCTurtle(mc, px, py-1, pz) # This actually initialize and create the turtle, in this case under the player's feet.
time.sleep(3) # Time sleep 3 second so we can have time to switch over and see what is going on
# SET UP STUFF
#---------------------------------------------
# ACTUAL STUFF
t.updateStroke(159) # We update the stroke to 159, which is white_hardened_clay. This makes the pen draw that block when it is moved around.
t.forward(15) # We move the pen forward 15 blocks
time.sleep(1) # We wait 1 second
t.turn(direction.UP) # Turn the pen UP
time.sleep(1) # Wait another second
t.forward(10) # Move the pen forward 10 blocks (In this case it moves up because the pen it pointing upwards. Think of your self as the pen. When you turn up, going forward means going in that direction)
time.sleep(1)
t.turn(direction.DOWN) # We turn the pen back down
time.sleep(1)
t.home() # And finally, we go to the original place where the turtle came from.
| StarcoderdataPython |
11330936 | # -*- coding: UTF-8 -*-
from __future__ import print_function, division, absolute_import
from io import StringIO
from numba.annotate.annotate import (Source, Annotation, Intermediate, Program,
A_type, render_text, Renderer)
# ______________________________________________________________________
py_source = Source(
linemap={ 1: u'def foo(a, b):',
2: u' print a * b',
3: u' a / b',
4: u' return a - b' },
annotations={ 2: [Annotation(A_type, (u'a', u'double')),
Annotation(A_type, (u'b', u'double'))] }
)
class LLVMRenderer(Renderer):
capabilities = frozenset(["source"])
def render(self, capability):
linenomap = { 1: [0], 2: [1, 2, 3], 4: [5], }
llvm_linemap = {
0: u'call @printf(%a, %b)',
1: u'%0 = load a',
2: u'%1 = load b',
3: u'%2 = fadd %0 %1',
4: u'%3 = fdiv %a %b',
5: u'ret something',
}
annotations = {
3: [Annotation(A_type, (u'%0', u'double')),
Annotation(A_type, (u'%1', u'double'))],
}
return linenomap, Source(llvm_linemap, annotations)
llvm_intermediate = Intermediate("llvm", LLVMRenderer())
p = Program(py_source, [llvm_intermediate])
# ______________________________________________________________________
def run_render_text():
f = StringIO()
render_text(p, emit=f.write)
src = f.getvalue()
assert 'def foo(a, b):' in src
assert 'print a * b' in src
assert 'return a - b' in src
assert 'double' in src
# print(src)
def run_render_text_inline():
f = StringIO()
render_text(p, emit=f.write, intermediate_names=["llvm"])
src = f.getvalue()
assert 'def foo(a, b):' in src
assert '____llvm____' in src
assert '%0 = load a' in src
# print(src)
def run_render_text_outline():
f = StringIO()
render_text(p, emit=f.write, inline=False, intermediate_names=["llvm"])
src = f.getvalue()
assert 'def foo(a, b):' in src
assert "====llvm====" in src
assert '%0 = load a' in src
# print(src)
run_render_text()
run_render_text_inline()
run_render_text_outline() | StarcoderdataPython |
6406056 | from django.db import models
from django.contrib.auth.models import User
class Cryptographer(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
points = models.PositiveIntegerField(default=0)
def __str__(self):
return str(self.user)
| StarcoderdataPython |
263947 | <reponame>scopatz/leyline
"""Tools for handling events in the documents."""
from leyline.ast import indent
from leyline.context_visitor import ContextVisitor
class EventsVisitor(ContextVisitor):
def __init__(self, *, initial_event=None, **kwargs):
super().__init__(**kwargs)
self.events = []
if initial_event is None:
initial_event = Event()
self.events.append(initial_event)
def __str__(self):
s = 'Events:\n' + indent('\n'.join(map(str, self.events)), ' ')
return s
@property
def current_event(self):
return self.events[-1]
@current_event.setter
def current_event(self, val):
self.events.append(val)
def visit_node(self, node):
"""generic vistor just adds node to current event body."""
self.current_event.append(node)
def _bodied_visit(self, node):
"""Visits each subnode in the body of the given node."""
for n in node.body:
self.visit(n)
visit_document = _bodied_visit
visit_textblock = _bodied_visit
visit_corporealmacro = _bodied_visit
class Event:
"""A generic event base class that contains ASTs to render for
a given duration begining at a start time.
"""
type = 'event'
attrs = ()
def __init__(self, *, body=None, start=None, duration=None, **kwargs):
self.body = [] if body is None else body
self.start = start
self.duration = duration
def render_latex(self, visitor):
return '\\phantom{}'
def render_notes(self, visitor):
return '\\phantom{}'
def render(self, target, visitor):
if not hasattr(visitor, 'events'):
return ''
visitor.current_event = self
return ''
def append(self, node):
self.body.append(node)
def __str__(self):
s = self.__class__.__name__ + '(\n'
s += ' start=' + repr(self.start) + ',\n'
s += ' duration=' + repr(self.duration) + ',\n'
for attr in self.attrs:
s += ' {0}={1},\n'.format(attr, getattr(self, attr))
s += ' body=[\n'
s += ' ' + indent(',\n'.join(map(str, self.body)), ' ')
s += ' ]\n)'
return s
def __repr__(self):
return str(self)
def __eq__(self, other):
if self.type != other.type:
return False
if self.start != other.start:
return False
if self.duration != other.duration:
return False
if self.body != other.body:
return False
for attr in self.attrs:
if getattr(self, attr) != getattr(other, attr):
return False
return True
class Slide(Event):
"""A transition event representing moving to a new slide.
Each slide consists of a number of sub-slides.
"""
type = 'slide'
attrs = ('title',)
def __init__(self, title='', body=None, start=None, duration=None, **kwargs):
super().__init__(**kwargs)
self.title = title
self.body = [[]] if body is None else body
self.start = [None] if start is None else start
self.duration = [None] if duration is None else duration
self.idx = 0 # which subslide index new nodes should be applied to
def append(self, node):
while len(self.body) < self.idx + 1:
self.body.append([])
self.start.append(None)
self.duration.append(None)
self.body[self.idx].append(node)
def render_slides(self, visitor):
title = self.title or ''
return '\\end{frame}\n\\begin{frame}\n\\frametitle{' + title + '}\n'
class Subslide(Event):
"""Event that modifies the subslide index on the current slide."""
type = 'subslide'
attrs = ('idx',)
def __init__(self, idx=None, **kwargs):
self.idx = idx
self.body = self.start = self.duration = None
def render(self, target, visitor):
# this event should not add itself to the visitor
if not hasattr(visitor, 'events'):
return ''
for event in reversed(visitor.events):
if isinstance(event, Slide):
break
else:
raise ValueError('subslide before slide')
idx = event.idx + 1 if self.idx is None else self.idx
event.idx = idx
return ''
def append(self, node):
pass
class Sleep(Event):
"""An event representing pausing for the provided number of seconds."""
type = 'sleep'
def __init__(self, duration=0.0, **kwargs):
super().__init__(duration=duration, **kwargs)
EVENTS_CTX = {_.type: _ for _ in globals().values() if isinstance(_, type) and issubclass(_, Event)}
| StarcoderdataPython |
6620248 | from keras_peleenet import peleenet_model
from PIL import Image
import numpy as np
import torchvision.transforms as transforms
def softmax(x):
return np.exp(x)/np.sum(np.exp(x),axis=0)
model = peleenet_model(input_shape=(224, 224, 3))
model.load_weights('peleenet_keras_weights.h5')
file_name = 'synset_words.txt'
classes = {}
for line in open(file_name):
line = line.rstrip().split(':')
classes[int(line[0])] = line[1]
print(classes)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
])
img = 'images/pig.jpeg'
img = Image.open(img)
np_img = np.asarray(img)
img = transform(img)
img.unsqueeze_(dim=0)
print(img.shape)
img = img.cpu().numpy()
img = img.transpose((0, 2, 3, 1))
output = model.predict(img)[0]
print(output)
output = softmax(output)
print(classes[np.argmax(output)]) | StarcoderdataPython |
5112664 | # Generated by Django 3.0.7 on 2021-06-25 17:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Redmine_issues',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('assigned_to_name', models.CharField(max_length=50, null=True)),
('author_name', models.CharField(max_length=50, null=True)),
('issue_id', models.IntegerField()),
('subject', models.CharField(max_length=200, null=True)),
('description', models.TextField(null=True)),
('status_name', models.CharField(max_length=20, null=True)),
('tracker_name', models.CharField(max_length=20, null=True)),
('priority_name', models.CharField(max_length=20, null=True)),
('project_name', models.CharField(max_length=20, null=True)),
('category_name', models.CharField(max_length=20, null=True)),
('custom_field1_value', models.CharField(max_length=100, null=True)),
('custom_field2_value', models.CharField(max_length=100, null=True)),
('custom_field3_value', models.TextField(null=True)),
('done_ratio', models.IntegerField(null=True)),
('estimated_hours', models.DecimalField(decimal_places=2, max_digits=10, null=True)),
('start_date', models.DateTimeField(null=True)),
('due_date', models.DateTimeField(null=True)),
('created_on', models.DateTimeField(null=True)),
('updated_on', models.DateTimeField(null=True)),
('closed_on', models.DateTimeField(null=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('date_modified', models.DateTimeField(auto_now=True)),
],
),
]
| StarcoderdataPython |
3435605 | import argparse
import pickle
import vgg
import torch
import torch.optim as optim
from torch.utils import data
from HDF5Dataset import HDF5Dataset
import sys
# define the command line parser.
parser = argparse.ArgumentParser(description="""Script to train the GalaxyZoo VGG network on the
GalaxyZoo dataset.""")
parser.add_argument('--batch-size', '-b', type=int, default=16, help='batch size for training.')
parser.add_argument('--epochs', '-e', type=int, default=100, help='epochs for training.')
parser.add_argument('--dataset-path', type=str, default='./data', help='path to the galaxy zoo dataset.')
parser.add_argument('--optimizer', type=str.lower, default='sgd', help='optimizer for training.')
parser.add_argument('--learning-rate', type=float, default=0.01, help='learning rate for training.')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum for training.')
parser.add_argument('--network', type=str, default='vgg11', help='CNN network to train.')
parser.add_argument('--batch-norm', default=False, action='store_true')
parser.add_argument('--data-path', type=str, default='/home/idies/workspace/Temporary/adi/scratch/galaxyzoo-cnn-aas2021/data/', help='Path to the galaxyzoo dataset.')
args = parser.parse_args()
# check args values.
if args.optimizer != 'sgd' and args.optimizer != 'adam':
raise ValueError("--optimizer must be 'sgd' or 'adam'. Got '{}' instead.".format(args.optimizer))
if args.batch_norm:
args.network += '_bn'
vgg_network = getattr(vgg, args.network)()
if args.optimizer == 'sgd':
optimizer = optim.SGD(vgg_network.parameters(), lr=args.learning_rate, momentum=args.momentum)
print('{}\n'.format(vars(args)))
print('{}\n'.format(vgg_network))
print('{}\n'.format(optimizer))
train_data = HDF5Dataset(args.data_path, min_pixel_dims=0, max_pixel_dims=sys.maxsize, label_keys=['p_el', 'p_cw', 'p_acw', 'p_edge', 'p_dk', 'p_mg'])
data_loader = data.DataLoader(train_data, batch_size = args.batch_size, shuffle=True)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
vgg_network.to(device)
for epoch in range(100):
running_loss = 0.
nitems = 0
total_loss = 0.
train_acc = 0.
for i, data in enumerate(data_loader, 0):
inputs, labels = data[0].float().to(device), data[1].float().to(device)
optimizer.zero_grad()
outputs = net(inputs)
outputs_logsftmx = F.log_softmax(outputs, dim=-1)
pred = torch.argmax(outputs_logsftmx, dim=1)
ground_truth = torch.argmax(labels, dim=1)
train_acc += torch.sum(pred == ground_truth).item()
loss = torch.mean(torch.sum(- labels * outputs_logsftmx, 1))
loss.backward()
optimizer.step()
nitems += 1
running_loss += loss.item()
print('[%d] loss: %.5f, training accuracy: %.2%%' %(epoch+1, running_loss, 100*train_acc))
running_loss = 0.
#for epoch in range(args.epochs):
# for i, data in enumerate(data_loader, 0):
| StarcoderdataPython |
1656325 | from django.contrib.auth import get_user_model
from django.test import SimpleTestCase, TestCase
from django.urls import resolve, reverse
from .views import AboutPageView, HomePageView
class HomePageTests(SimpleTestCase):
def test_home_page_status_code(self):
response = self.client.get("/")
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "home.html")
def tesst_homepage_url_resolves_homepageview(self):
view = resolve("home")
self.assertEqual(view.func.__name__, HomePageView.as_view().__name__)
class AboutPageTests(SimpleTestCase):
def setUp(self):
url = reverse("about")
self.response = self.client.get(url)
def test_aboutpage_status_code(self):
self.assertEqual(self.response.status_code, 200)
def test_aboutpage_template(self):
self.assertTemplateUsed(self.response, "about.html")
def test_aboutpage_contains_correct_html(self):
self.assertContains(self.response, "About Page")
def test_aboutpage_does_not_contain_incorrect_html(self):
self.assertNotContains(self.response, "I should not be here.")
def test_aboutpage_url_resolves_aboutpageview(self):
view = resolve("/about/")
self.assertEqual(view.func.__name__, AboutPageView.as_view().__name__)
class SignUpPageTests(TestCase):
username = "newuser"
email = "<EMAIL>"
def test_signup_page_status_code(self):
response = self.client.get("/accounts/signup/")
self.assertEqual(response.status_code, 200)
def test_view_url_by_name(self):
response = self.client.get(reverse("account_signup"))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse("account_signup"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "account/signup.html")
def test_signup_form(self):
new_user = get_user_model().objects.create_user( # noqa: F841
self.username, self.email
)
self.assertEqual(get_user_model().objects.all().count(), 1)
self.assertEqual(
get_user_model().objects.all()[0].username, self.username
)
self.assertEqual(get_user_model().objects.all()[0].email, self.email)
| StarcoderdataPython |
3556465 | import sys
from typing import Dict, Optional
import click
from cognite.client.exceptions import CogniteAPIError, CogniteNotFoundError
from cognite.transformations_cli.clients import get_client
from cognite.transformations_cli.commands.utils import (
exit_with_cognite_api_error,
get_transformation,
is_id_exclusive,
print_jobs,
print_metrics,
print_notifications,
print_sql,
print_transformations,
)
@click.command(help="Show detalis of a transformation")
@click.option("--id", help="The id of the transformation to show. Either this or --external-id can be specified.")
@click.option(
"--external-id", help="The external_id of the transformation to show. Either this or --id can be specified."
)
@click.option("--job-id", help="The id of the job to show. Include this to show job details.")
@click.pass_obj
def show(obj: Dict, id: Optional[int], external_id: Optional[str], job_id: Optional[int]) -> None:
client = get_client(obj)
is_id_exclusive(id, external_id)
if not (id or external_id or job_id):
click.echo("Please provide id, external_id or job_id")
sys.exit(1)
try:
tr = None
job = None
if id or external_id:
# TODO Investigate why id requires type casting as it doesn't in "jobs command"
id = int(id) if id else None
tr = get_transformation(client=client, id=id, external_id=external_id)
click.echo("Transformation details:")
click.echo(print_transformations([tr]))
notifications = client.transformations.notifications.list(
transformation_id=id, transformation_external_id=external_id, limit=-1
)
if tr.query:
click.echo("SQL Query:")
click.echo(print_sql(tr.query))
if notifications:
click.echo("Notifications:")
click.echo(print_notifications(notifications))
if job_id:
click.echo()
job_id = int(job_id)
job = client.transformations.jobs.retrieve(id=int(job_id))
metrics = [
m
for m in client.transformations.jobs.list_metrics(id=job_id)
if m.name != "requestsWithoutRetries" and m.name != "requests"
]
click.echo("Job details:")
click.echo(print_jobs([job]))
click.echo("SQL Query:")
click.echo(print_sql(job.query))
if job.status == "Failed":
click.echo(f"Error Details: {job.error}")
if metrics:
click.echo("Progress:")
click.echo(print_metrics(metrics))
except (CogniteNotFoundError, CogniteAPIError) as e:
exit_with_cognite_api_error(e)
| StarcoderdataPython |
4866800 | BOOST_VERSION = "1.70.0"
def new_boost_library(name, deps = []):
boost_library(name, deps)
boost_build_rule(name)
def boost_library(name, deps = []):
native.cc_library(
name = name,
srcs =
select({
"osx": [
"libboost_{}.a".format(name),
"libboost_{}.dylib".format(name)
],
"clang": [
"libboost_{}.a".format(name),
"libboost_{}.so.{}".format(name, BOOST_VERSION),
],
"//conditions:default": [
"libboost_{}.a".format(name),
"libboost_{}.so.{}".format(name, BOOST_VERSION),
],
}),
hdrs = native.glob(
[
"boost/{}.hpp".format(name),
"boost/{}/**/*.h".format(name),
"boost/{}/**/*.hpp".format(name),
],
),
visibility = ["//visibility:public"],
deps = [
":all",
] + deps,
)
def boost_build_rule(name):
native.genrule(
name = "build_boost_{}".format(name),
srcs = native.glob(
[
"Jamroot",
"**/Jamfile*",
"**/*.jam",
"**/*.cpp",
"**/*.c",
"**/*.S",
"**/*.h",
"**/*.hpp",
"**/*.ipp",
"project-config.jam",
],
exclude = [
"bazel-*",
"libs/wave/test/**/*",
],
) + [
"project-config.jam",
],
outs = [
"libboost_{}.a".format(name),
# Until we can figure out how to correctly build dylib files and dynamically link the test binaries
# on MacOS, when you are running on MacOS you can comment out the "libboost_{}.so.{}" line and uncomment
# the "libboost_{}.dylib" line to compile the project and run the tests.
# Unfortunately, select statements are not allowed for genrule `outs` :(
"libboost_{}.so.{}".format(name, BOOST_VERSION),
# "libboost_{}.dylib".format(name), # Use this on MacOS instead of the .so line above.
],
cmd =
select({
"osx": """
ROOT=$$(dirname $(location Jamroot))
cp $(location project-config.jam) $$ROOT
pushd $$ROOT
../../$(location b2) libboost_{name}.a libboost_{name}.dylib
popd
cp $$ROOT/stage/lib/libboost_{name}.a $(location libboost_{name}.a)
cp $$ROOT/stage/lib/libboost_{name}.dylib $(location libboost_{name}.dylib)
""".format(name = name),
"clang": """
ROOT=$$(dirname $(location Jamroot))
cp $(location project-config.jam) $$ROOT
pushd $$ROOT
../../$(location b2) toolset=clang libboost_{name}.a libboost_{name}.so.{version}
popd
cp $$ROOT/stage/lib/libboost_{name}.a $(location libboost_{name}.a)
cp $$ROOT/stage/lib/libboost_{name}.so.{version} $(location libboost_{name}.so.{version})
""".format(name = name, version = BOOST_VERSION),
"//conditions:default": """
ROOT=$$(dirname $(location Jamroot))
cp $(location project-config.jam) $$ROOT
pushd $$ROOT
../../$(location b2) libboost_{name}.a libboost_{name}.so.{version}
popd
cp $$ROOT/stage/lib/libboost_{name}.a $(location libboost_{name}.a)
cp $$ROOT/stage/lib/libboost_{name}.so.{version} $(location libboost_{name}.so.{version})
""".format(name = name, version = BOOST_VERSION),
}),
tools = ["b2"],
)
| StarcoderdataPython |
11377386 | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from collections import defaultdict
from decimal import Decimal
from itertools import groupby
from typing import Dict, Optional, Tuple
from django.core.exceptions import ValidationError
from django.core.validators import MinValueValidator
from django.db import models
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from django_scopes import ScopedManager
from pretix.base.decimal import round_decimal
from pretix.base.models import fields
from pretix.base.models.base import LoggedModel
class Discount(LoggedModel):
SUBEVENT_MODE_MIXED = 'mixed'
SUBEVENT_MODE_SAME = 'same'
SUBEVENT_MODE_DISTINCT = 'distinct'
SUBEVENT_MODE_CHOICES = (
(SUBEVENT_MODE_MIXED, pgettext_lazy('subevent', 'Dates can be mixed without limitation')),
(SUBEVENT_MODE_SAME, pgettext_lazy('subevent', 'All matching products must be for the same date')),
(SUBEVENT_MODE_DISTINCT, pgettext_lazy('subevent', 'Each matching product must be for a different date')),
)
event = models.ForeignKey(
'Event',
on_delete=models.CASCADE,
related_name='discounts',
)
active = models.BooleanField(
verbose_name=_("Active"),
default=True,
)
internal_name = models.CharField(
verbose_name=_("Internal name"),
max_length=255
)
position = models.PositiveIntegerField(
default=0,
verbose_name=_("Position")
)
sales_channels = fields.MultiStringField(
verbose_name=_('Sales channels'),
default=['web'],
blank=False,
)
available_from = models.DateTimeField(
verbose_name=_("Available from"),
null=True,
blank=True,
)
available_until = models.DateTimeField(
verbose_name=_("Available until"),
null=True,
blank=True,
)
subevent_mode = models.CharField(
verbose_name=_('Event series handling'),
max_length=50,
default=SUBEVENT_MODE_MIXED,
choices=SUBEVENT_MODE_CHOICES,
)
condition_all_products = models.BooleanField(
default=True,
verbose_name=_("Apply to all products (including newly created ones)")
)
condition_limit_products = models.ManyToManyField(
'Item',
verbose_name=_("Apply to specific products"),
blank=True
)
condition_apply_to_addons = models.BooleanField(
default=True,
verbose_name=_("Apply to add-on products"),
help_text=_("Discounts never apply to bundled products"),
)
condition_ignore_voucher_discounted = models.BooleanField(
default=False,
verbose_name=_("Ignore products discounted by a voucher"),
help_text=_("If this option is checked, products that already received a discount through a voucher will not "
"be considered for this discount. However, products that use a voucher only to e.g. unlock a "
"hidden product or gain access to sold-out quota will still receive the discount."),
)
condition_min_count = models.PositiveIntegerField(
verbose_name=_('Minimum number of matching products'),
default=0,
)
condition_min_value = models.DecimalField(
verbose_name=_('Minimum gross value of matching products'),
decimal_places=2,
max_digits=10,
default=Decimal('0.00'),
)
benefit_discount_matching_percent = models.DecimalField(
verbose_name=_('Percentual discount on matching products'),
decimal_places=2,
max_digits=10,
default=Decimal('0.00'),
validators=[MinValueValidator(Decimal('0.00'))],
)
benefit_only_apply_to_cheapest_n_matches = models.PositiveIntegerField(
verbose_name=_('Apply discount only to this number of matching products'),
help_text=_(
'This option allows you to create discounts of the type "buy X get Y reduced/for free". For example, if '
'you set "Minimum number of matching products" to four and this value to two, the customer\'s cart will be '
'split into groups of four tickets and the cheapest two tickets within every group will be discounted. If '
'you want to grant the discount on all matching products, keep this field empty.'
),
null=True,
blank=True,
validators=[MinValueValidator(1)],
)
# more feature ideas:
# - max_usages_per_order
# - promote_to_user_if_almost_satisfied
# - require_customer_account
objects = ScopedManager(organizer='event__organizer')
class Meta:
ordering = ('position', 'id')
def __str__(self):
return self.internal_name
@property
def sortkey(self):
return self.position, self.id
def __lt__(self, other) -> bool:
return self.sortkey < other.sortkey
@classmethod
def validate_config(cls, data):
# We forbid a few combinations of settings, because we don't think they are neccessary and at the same
# time they introduce edge cases, in which it becomes almost impossible to compute the discount optimally
# and also very hard to understand for the user what is going on.
if data.get('condition_min_count') and data.get('condition_min_value'):
raise ValidationError(
_('You can either set a minimum number of matching products or a minimum value, not both.')
)
if not data.get('condition_min_count') and not data.get('condition_min_value'):
raise ValidationError(
_('You need to either set a minimum number of matching products or a minimum value.')
)
if data.get('condition_min_value') and data.get('benefit_only_apply_to_cheapest_n_matches'):
raise ValidationError(
_('You cannot apply the discount only to some of the matched products if you are matching '
'on a minimum value.')
)
if data.get('subevent_mode') == cls.SUBEVENT_MODE_DISTINCT and data.get('condition_min_value'):
raise ValidationError(
_('You cannot apply the discount only to bookings of different dates if you are matching '
'on a minimum value.')
)
def allow_delete(self):
return not self.orderposition_set.exists()
def clean(self):
super().clean()
Discount.validate_config({
'condition_min_count': self.condition_min_count,
'condition_min_value': self.condition_min_value,
'benefit_only_apply_to_cheapest_n_matches': self.benefit_only_apply_to_cheapest_n_matches,
'subevent_mode': self.subevent_mode,
})
def _apply_min_value(self, positions, idx_group, result):
if self.condition_min_value and sum(positions[idx][2] for idx in idx_group) < self.condition_min_value:
return
if self.condition_min_count or self.benefit_only_apply_to_cheapest_n_matches:
raise ValueError('Validation invariant violated.')
for idx in idx_group:
previous_price = positions[idx][2]
new_price = round_decimal(
previous_price * (Decimal('100.00') - self.benefit_discount_matching_percent) / Decimal('100.00'),
self.event.currency,
)
result[idx] = new_price
def _apply_min_count(self, positions, idx_group, result):
if len(idx_group) < self.condition_min_count:
return
if not self.condition_min_count or self.condition_min_value:
raise ValueError('Validation invariant violated.')
if self.benefit_only_apply_to_cheapest_n_matches:
if not self.condition_min_count:
raise ValueError('Validation invariant violated.')
idx_group = sorted(idx_group, key=lambda idx: (positions[idx][2], -idx)) # sort by line_price
# Prevent over-consuming of items, i.e. if our discount is "buy 2, get 1 free", we only
# want to match multiples of 3
consume_idx = idx_group[:len(idx_group) // self.condition_min_count * self.condition_min_count]
benefit_idx = idx_group[:len(idx_group) // self.condition_min_count * self.benefit_only_apply_to_cheapest_n_matches]
else:
consume_idx = idx_group
benefit_idx = idx_group
for idx in benefit_idx:
previous_price = positions[idx][2]
new_price = round_decimal(
previous_price * (Decimal('100.00') - self.benefit_discount_matching_percent) / Decimal('100.00'),
self.event.currency,
)
result[idx] = new_price
for idx in consume_idx:
result.setdefault(idx, positions[idx][2])
def apply(self, positions: Dict[int, Tuple[int, Optional[int], Decimal, bool, Decimal]]) -> Dict[int, Decimal]:
"""
Tries to apply this discount to a cart
:param positions: Dictionary mapping IDs to tuples of the form
``(item_id, subevent_id, line_price_gross, is_addon_to, voucher_discount)``.
Bundled positions may not be included.
:return: A dictionary mapping keys from the input dictionary to new prices. All positions
contained in this dictionary are considered "consumed" and should not be considered
by other discounts.
"""
result = {}
if not self.active:
return result
limit_products = set()
if not self.condition_all_products:
limit_products = {p.pk for p in self.condition_limit_products.all()}
# First, filter out everything not even covered by our product scope
initial_candidates = [
idx
for idx, (item_id, subevent_id, line_price_gross, is_addon_to, voucher_discount) in positions.items()
if (
(self.condition_all_products or item_id in limit_products) and
(self.condition_apply_to_addons or not is_addon_to) and
(not self.condition_ignore_voucher_discounted or voucher_discount is None or voucher_discount == Decimal('0.00'))
)
]
if self.subevent_mode == self.SUBEVENT_MODE_MIXED: # also applies to non-series events
if self.condition_min_count:
self._apply_min_count(positions, initial_candidates, result)
else:
self._apply_min_value(positions, initial_candidates, result)
elif self.subevent_mode == self.SUBEVENT_MODE_SAME:
def key(idx):
return positions[idx][1] # subevent_id
# Build groups of candidates with the same subevent, then apply our regular algorithm
# to each group
_groups = groupby(sorted(initial_candidates, key=key), key=key)
candidate_groups = [list(g) for k, g in _groups]
for g in candidate_groups:
if self.condition_min_count:
self._apply_min_count(positions, g, result)
else:
self._apply_min_value(positions, g, result)
elif self.subevent_mode == self.SUBEVENT_MODE_DISTINCT:
if self.condition_min_value:
raise ValueError('Validation invariant violated.')
# Build optimal groups of candidates with distinct subevents, then apply our regular algorithm
# to each group. Optimal, in this case, means:
# - First try to build as many groups of size condition_min_count as possible while trying to
# balance out the cheapest products so that they are not all in the same group
# - Then add remaining positions to existing groups if possible
candidate_groups = []
# Build a list of subevent IDs in descending order of frequency
subevent_to_idx = defaultdict(list)
for idx, p in positions.items():
subevent_to_idx[p[1]].append(idx)
for v in subevent_to_idx.values():
v.sort(key=lambda idx: positions[idx][2])
subevent_order = sorted(list(subevent_to_idx.keys()), key=lambda s: len(subevent_to_idx[s]), reverse=True)
# Build groups of exactly condition_min_count distinct subevents
current_group = []
while True:
# Build a list of candidates, which is a list of all positions belonging to a subevent of the
# maximum cardinality, where the cardinality of a subevent is defined as the number of tickets
# for that subevent that are not yet part of any group
candidates = []
cardinality = None
for se, l in subevent_to_idx.items():
l = [ll for ll in l if ll not in current_group]
if cardinality and len(l) != cardinality:
continue
if se not in {positions[idx][1] for idx in current_group}:
candidates += l
cardinality = len(l)
if not candidates:
break
# Sort the list by prices, then pick one. For "buy 2 get 1 free" we apply a "pick 1 from the start
# and 2 from the end" scheme to optimize price distribution among groups
candidates = sorted(candidates, key=lambda idx: positions[idx][2])
if len(current_group) < (self.benefit_only_apply_to_cheapest_n_matches or 0):
candidate = candidates[0]
else:
candidate = candidates[-1]
current_group.append(candidate)
# Only add full groups to the list of groups
if len(current_group) >= max(self.condition_min_count, 1):
candidate_groups.append(current_group)
for c in current_group:
subevent_to_idx[positions[c][1]].remove(c)
current_group = []
# Distribute "leftovers"
for se in subevent_order:
if subevent_to_idx[se]:
for group in candidate_groups:
if se not in {positions[idx][1] for idx in group}:
group.append(subevent_to_idx[se].pop())
if not subevent_to_idx[se]:
break
for g in candidate_groups:
self._apply_min_count(positions, g, result)
return result
| StarcoderdataPython |
361202 | import aioprocessing
from dataclasses import dataclass
from decimal import Decimal
import os
from pathlib import Path
from typing import Optional, Any, Dict, AsyncIterable, List
from hummingbot.core.event.events import TradeType
from hummingbot.core.utils import detect_available_port
_default_paths: Optional["GatewayPaths"] = None
_hummingbot_pipe: Optional[aioprocessing.AioConnection] = None
GATEWAY_DOCKER_REPO: str = "coinalpha/gateway-v2-dev"
GATEWAY_DOCKER_TAG: str = "20220401-arm" if os.uname().machine in {"arm64", "aarch64"} else "20220329"
S_DECIMAL_0: Decimal = Decimal(0)
def is_inside_docker() -> bool:
"""
Checks whether this Hummingbot instance is running inside a container.
:return: True if running inside container, False otherwise.
"""
if os.name != "posix":
return False
try:
with open("/proc/1/cmdline", "rb") as fd:
cmdline_txt: bytes = fd.read()
return b"quickstart" in cmdline_txt
except Exception:
return False
def get_gateway_container_name() -> str:
"""
Calculates the name for the gateway container, for this Hummingbot instance.
:return: Gateway container name
"""
from hummingbot.client.config.global_config_map import global_config_map
instance_id_suffix: str = global_config_map["instance_id"].value[:8]
return f"hummingbot-gateway-{instance_id_suffix}"
@dataclass
class GatewayPaths:
"""
Represents the local paths and Docker mount paths for a gateway container's conf, certs and logs directories.
Local paths represent where Hummingbot client sees the paths from the perspective of its local environment. If
Hummingbot is being run from source, then the local environment is the same as the host environment. However, if
Hummingbot is being run as a container, then the local environment is the container's environment.
Mount paths represent where the gateway container's paths are located on the host environment. If Hummingbot is
being run from source, then these should be the same as the local paths. However, if Hummingbot is being run as a
container - then these must be fed to it from external sources (e.g. environment variables), since containers
generally only have very restricted access to the host filesystem.
"""
local_conf_path: Path
local_certs_path: Path
local_logs_path: Path
mount_conf_path: Path
mount_certs_path: Path
mount_logs_path: Path
def __post_init__(self):
"""
Ensure the local paths are created when a GatewayPaths object is created.
"""
for path in [self.local_conf_path, self.local_certs_path, self.local_logs_path]:
path.mkdir(mode=0o755, parents=True, exist_ok=True)
def get_gateway_paths() -> GatewayPaths:
"""
Calculates the default paths for a gateway container.
For Hummingbot running from source, the gateway files are to be stored in ~/.hummingbot-gateway/<container name>/
For Hummingbot running inside container, the gateway files are to be stored in ~/.hummingbot-gateway/ locally;
and inside the paths pointed to be CERTS_FOLDER, GATEWAY_CONF_FOLDER, GATEWAY_LOGS_FOLDER environment variables
on the host system.
"""
global _default_paths
if _default_paths is not None:
return _default_paths
inside_docker: bool = is_inside_docker()
gateway_container_name: str = get_gateway_container_name()
external_certs_path: Optional[Path] = os.getenv("CERTS_FOLDER") and Path(os.getenv("CERTS_FOLDER"))
external_conf_path: Optional[Path] = os.getenv("GATEWAY_CONF_FOLDER") and Path(os.getenv("GATEWAY_CONF_FOLDER"))
external_logs_path: Optional[Path] = os.getenv("GATEWAY_LOGS_FOLDER") and Path(os.getenv("GATEWAY_LOGS_FOLDER"))
if inside_docker and not (external_certs_path and external_conf_path and external_logs_path):
raise EnvironmentError("CERTS_FOLDER, GATEWAY_CONF_FOLDER and GATEWAY_LOGS_FOLDER must be defined when "
"running as container.")
base_path: Path = (
Path.home().joinpath(".hummingbot-gateway")
if inside_docker
else Path.home().joinpath(f".hummingbot-gateway/{gateway_container_name}")
)
local_certs_path: Path = base_path.joinpath("certs")
local_conf_path: Path = base_path.joinpath("conf")
local_logs_path: Path = base_path.joinpath("logs")
mount_certs_path: Path = external_certs_path or local_certs_path
mount_conf_path: Path = external_conf_path or local_conf_path
mount_logs_path: Path = external_logs_path or local_logs_path
_default_paths = GatewayPaths(
local_conf_path=local_conf_path,
local_certs_path=local_certs_path,
local_logs_path=local_logs_path,
mount_conf_path=mount_conf_path,
mount_certs_path=mount_certs_path,
mount_logs_path=mount_logs_path
)
return _default_paths
def get_default_gateway_port() -> int:
from hummingbot.client.config.global_config_map import global_config_map
return detect_available_port(16000 + int(global_config_map.get("instance_id").value[:4], 16) % 16000)
def set_hummingbot_pipe(conn: aioprocessing.AioConnection):
global _hummingbot_pipe
_hummingbot_pipe = conn
async def detect_existing_gateway_container() -> Optional[Dict[str, Any]]:
try:
results: List[Dict[str, Any]] = await docker_ipc(
"containers",
all=True,
filters={
"name": get_gateway_container_name(),
})
if len(results) > 0:
return results[0]
return
except Exception:
return
async def start_existing_gateway_container():
container_info: Optional[Dict[str, Any]] = await detect_existing_gateway_container()
if container_info is not None and container_info["State"] != "running":
await docker_ipc("start", get_gateway_container_name())
async def docker_ipc(method_name: str, *args, **kwargs) -> Any:
from hummingbot.client.hummingbot_application import HummingbotApplication
global _hummingbot_pipe
if _hummingbot_pipe is None:
raise RuntimeError("Not in the main process, or hummingbot wasn't started via `fork_and_start()`.")
try:
_hummingbot_pipe.send((method_name, args, kwargs))
data = await _hummingbot_pipe.coro_recv()
if isinstance(data, Exception):
HummingbotApplication.main_application().notify(
"\nError: Unable to communicate with docker socket. "
"\nEnsure dockerd is running and /var/run/docker.sock exists, then restart Hummingbot.")
raise data
return data
except Exception as e: # unable to communicate with docker socket
HummingbotApplication.main_application().notify(
"\nError: Unable to communicate with docker socket. "
"\nEnsure dockerd is running and /var/run/docker.sock exists, then restart Hummingbot.")
raise e
async def docker_ipc_with_generator(method_name: str, *args, **kwargs) -> AsyncIterable[str]:
from hummingbot.client.hummingbot_application import HummingbotApplication
global _hummingbot_pipe
if _hummingbot_pipe is None:
raise RuntimeError("Not in the main process, or hummingbot wasn't started via `fork_and_start()`.")
try:
_hummingbot_pipe.send((method_name, args, kwargs))
while True:
data = await _hummingbot_pipe.coro_recv()
if data is None:
break
if isinstance(data, Exception):
HummingbotApplication.main_application().notify(
"\nError: Unable to communicate with docker socket. "
"\nEnsure dockerd is running and /var/run/docker.sock exists, then restart Hummingbot.")
raise data
yield data
except Exception as e: # unable to communicate with docker socket
HummingbotApplication.main_application().notify(
"\nError: Unable to communicate with docker socket. "
"\nEnsure dockerd is running and /var/run/docker.sock exists, then restart Hummingbot.")
raise e
def check_transaction_exceptions(
allowances: Dict[str, Decimal],
balances: Dict[str, Decimal],
base_asset: str,
quote_asset: str,
amount: Decimal,
side: TradeType,
gas_limit: int,
gas_cost: Decimal,
gas_asset: str,
swaps_count: int
) -> List[str]:
"""
Check trade data for Ethereum decentralized exchanges
"""
exception_list = []
swaps_message: str = f"Total swaps: {swaps_count}"
gas_asset_balance: Decimal = balances.get(gas_asset, S_DECIMAL_0)
# check for sufficient gas
if gas_asset_balance < gas_cost:
exception_list.append(f"Insufficient {gas_asset} balance to cover gas:"
f" Balance: {gas_asset_balance}. Est. gas cost: {gas_cost}. {swaps_message}")
asset_out: str = quote_asset if side is TradeType.BUY else base_asset
asset_out_allowance: Decimal = allowances.get(asset_out, S_DECIMAL_0)
# check for gas limit set to low
gas_limit_threshold: int = 21000
if gas_limit < gas_limit_threshold:
exception_list.append(f"Gas limit {gas_limit} below recommended {gas_limit_threshold} threshold.")
# check for insufficient token allowance
if allowances[asset_out] < amount:
exception_list.append(f"Insufficient {asset_out} allowance {asset_out_allowance}. Amount to trade: {amount}")
return exception_list
async def start_gateway():
from hummingbot.client.hummingbot_application import HummingbotApplication
try:
response = await docker_ipc(
"containers",
all=True,
filters={"name": get_gateway_container_name()}
)
if len(response) == 0:
raise ValueError(f"Gateway container {get_gateway_container_name()} not found. ")
container_info = response[0]
if container_info["State"] == "running":
HummingbotApplication.main_application().notify(f"Gateway container {container_info['Id']} already running.")
return
await docker_ipc(
"start",
container=container_info["Id"]
)
HummingbotApplication.main_application().notify(f"Gateway container {container_info['Id']} has started.")
except Exception as e:
HummingbotApplication.main_application().notify(f"Error occurred starting Gateway container. {e}")
async def stop_gateway():
from hummingbot.client.hummingbot_application import HummingbotApplication
try:
response = await docker_ipc(
"containers",
all=True,
filters={"name": get_gateway_container_name()}
)
if len(response) == 0:
raise ValueError(f"Gateway container {get_gateway_container_name()} not found.")
container_info = response[0]
if container_info["State"] != "running":
HummingbotApplication.main_application().notify(f"Gateway container {container_info['Id']} not running.")
return
await docker_ipc(
"stop",
container=container_info["Id"],
)
HummingbotApplication.main_application().notify(f"Gateway container {container_info['Id']} successfully stopped.")
except Exception as e:
HummingbotApplication.main_application().notify(f"Error occurred stopping Gateway container. {e}")
async def restart_gateway():
from hummingbot.client.hummingbot_application import HummingbotApplication
await stop_gateway()
await start_gateway()
HummingbotApplication.main_application().notify("Gateway will be ready momentarily.")
| StarcoderdataPython |
5031274 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from torch import nn
from configs.constants import Constants
class ConvolutionTransposeBlock(nn.Module):
def __init__(self, width, height, in_channels, out_channels, stride, padding, output_padding, batch_on, relu_on):
super().__init__()
basic_parameters = Constants.get_basic_parameters()
unet_parameters = basic_parameters["unet"]
self.width = width
self.height = height
self.in_channels = in_channels
self.out_channels = out_channels
self.relu_on = relu_on
self.batch_on = batch_on
self.weight_scale_init_method = unet_parameters["weight_scale_init_method"]
self.convt = nn.ConvTranspose2d(in_channels, out_channels, (width, height),
stride=stride, bias=False, padding=padding, output_padding=output_padding)
if unet_parameters["batch_normalization"] and self.batch_on:
self.bnorm = nn.BatchNorm2d(out_channels)
if self.relu_on:
self.relu = nn.ReLU()
def forward(self, x):
x = self.convt(x)
if self.batch_on:
x = self.bnorm(x)
if self.relu_on:
x = self.relu(x)
return x
| StarcoderdataPython |
3287751 | <filename>src/Services/Phonebook/Root.py
from flask.views import MethodView
from flask import jsonify
from Helpers.EndpointList import EndpointList
class Root(MethodView):
def get(self, serviceName):
return jsonify(Endpoints=self.getEndpoints(serviceName))
def getEndpoints(self, serviceName):
endpoints = EndpointList(serviceName)
endpoints.newEndpoint('android')
return endpoints.getEndpointHrefs()
| StarcoderdataPython |
1625163 | #!/usr/bin/env python
#'''
#<NAME>
#'''
import argparse, urllib, os
def parse_downloadFiles_args():
parser = argparse.ArgumentParser(description="Take in a file where the first column holds the url of a file to be downloaded, will overwrite current files if they exist")
parser.add_argument('-f', '--file', type=str, required = True)
return parser.parse_args()
def download_files(urlsFile):
with open(urlsFile, "r") as f:
for line in f:
lineSplit = line.split()
print ("Downloading {url} to {file}".format(url = lineSplit[0], file = os.path.basename(lineSplit[0])))
urllib.urlretrieve(lineSplit[0], os.path.basename(lineSplit[0]))
if __name__ == "__main__":
args = parse_downloadFiles_args()
download_files(args.file)
| StarcoderdataPython |
62639 | <reponame>scionrep/scioncc_new<filename>src/ion/data/persist/test/test_hdf5_persist.py
#!/usr/bin/env python
__author__ = '<NAME>'
from nose.plugins.attrib import attr
import gevent
import yaml
import os
import random
from pyon.util.int_test import IonIntegrationTestCase
from pyon.public import BadRequest, NotFound, IonObject, RT, PRED, OT, CFG, StreamSubscriber, log
from pyon.ion.identifier import create_simple_unique_id
from ion.data.packet.packet_builder import DataPacketBuilder
from ion.data.persist.hdf5_dataset import DS_BASE_PATH, DS_FILE_PREFIX, DatasetHDF5Persistence, DS_TIMEIDX_PATH, DS_TIMEINGEST_PATH
from ion.data.schema.schema import DataSchemaParser
from ion.util.hdf_utils import HDFLockingFile
from ion.util.ntp_time import NTP4Time
from interface.objects import DataPacket
@attr('INT', group='data')
class TestHDF5Persist(IonIntegrationTestCase):
"""Test for HDF5 persistence
"""
def setUp(self):
from ion.data.persist.hdf5_dataset import h5py
if h5py is None:
self.skipTest("No h5py available to test")
self._start_container()
#self.container.start_rel_from_url('res/deploy/basic.yml')
self.rr = self.container.resource_registry
self.system_actor_id = None
def tearDown(self):
pass
def _get_data_packet(self, index, num_rows=1):
""" Return a data packet with number of samples.
The index indicates the offset from the starting timestamp, 10 sec per sample."""
base_ts = 1000000000
index_ts = base_ts + 10 * index
# Core samples as provided by agent.acquire_samples
sample_list = []
for i in xrange(num_rows):
ts = index_ts + i * 10
sample = [NTP4Time(ts).to_ntp64(),
float(index + i),
random.random()*100]
sample_list.append(sample)
sample_desc = dict(cols=["time", "var1", "random1"],
data=sample_list)
packet = DataPacketBuilder.build_packet_from_samples(sample_desc,
resource_id="ds_id", stream_name="basic_streams")
return packet
# Test row interval algorithm
# Test packed sample format
# Test other time formats
# Test out of order timestamps
# Test with large files (index extend etc)
def test_hdf5_persist(self):
# Test HDF5 writing, time indexing, array extension etc
ds_schema_str = """
type: scion_data_schema_1
description: Schema for test datasets
attributes:
basic_shape: 1d_timeseries
time_variable: time
persistence:
format: hdf5
layout: vars_individual
row_increment: 1000
time_index_step: 1000
variables:
- name: time
base_type: ntp_time
storage_dtype: i8
unit: ""
description: NTPv4 timestamp
- name: var1
base_type: float
storage_dtype: f8
unit: ""
description: Sample value
- name: random1
base_type: float
storage_dtype: f8
unit: ""
description: Random values
"""
ds_schema = yaml.load(ds_schema_str)
ds_id = create_simple_unique_id()
ds_filename = self.container.file_system.get("%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))
self.hdf5_persist = DatasetHDF5Persistence.get_persistence(ds_id, ds_schema, "hdf5")
self.hdf5_persist.require_dataset()
self.assertTrue(os.path.exists(ds_filename))
self.addCleanup(os.remove, ds_filename)
# Add 100 values in packets of 10
for i in xrange(10):
packet = self._get_data_packet(i*10, 10)
self.hdf5_persist.extend_dataset(packet)
data_res = self.hdf5_persist.get_data()
self.assertEqual(len(data_res), 3)
self.assertEqual(len(data_res["time"]), 100)
self.assertEqual(len(data_res["var1"]), 100)
self.assertEqual(len(data_res["random1"]), 100)
self.assertEqual(data_res["var1"][1], 1.0)
with HDFLockingFile(ds_filename, "r") as hdff:
ds_time = hdff["vars/time"]
cur_idx = ds_time.attrs["cur_row"]
self.assertEqual(cur_idx, 100)
self.assertEqual(len(ds_time), 1000)
ds_tidx = hdff[DS_TIMEIDX_PATH]
cur_tidx = ds_tidx.attrs["cur_row"]
self.assertEqual(cur_tidx, 1)
self.assertEqual(len(ds_tidx), 1000)
# Add 1000 values in packets of 10
for i in xrange(100):
packet = self._get_data_packet(100 + i*10, 10)
self.hdf5_persist.extend_dataset(packet)
data_res = self.hdf5_persist.get_data()
self.assertEqual(len(data_res["time"]), 1100)
with HDFLockingFile(ds_filename, "r") as hdff:
ds_time = hdff["vars/time"]
cur_idx = ds_time.attrs["cur_row"]
self.assertEqual(cur_idx, 1100)
self.assertEqual(len(ds_time), 2000)
ds_tidx = hdff[DS_TIMEIDX_PATH]
cur_tidx = ds_tidx.attrs["cur_row"]
self.assertEqual(cur_tidx, 2)
self.assertEqual(len(ds_tidx), 1000)
self.assertEqual(ds_time[0], ds_tidx[0][0])
self.assertEqual(ds_time[1000], ds_tidx[1][0])
info_res = self.hdf5_persist.get_data_info()
self.assertEqual(info_res["ds_rows"], 1100)
self.assertEqual(info_res["ts_first"], 1000000000.0)
self.assertEqual(info_res["ts_last"], 1000010990.0)
def test_hdf5_persist_prune(self):
# Test auto-pruning
ds_schema_str = """
type: scion_data_schema_1
description: Schema for test datasets
attributes:
basic_shape: 1d_timeseries
time_variable: time
persistence:
format: hdf5
layout: vars_individual
row_increment: 1000
time_index_step: 1000
pruning:
trigger_mode: on_ingest
prune_mode: max_age_rel
prune_action: rewrite
trigger_age: 1000.0
retain_age: 500.0
variables:
- name: time
base_type: ntp_time
storage_dtype: i8
unit: ""
description: NTPv4 timestamp
- name: var1
base_type: float
storage_dtype: f8
unit: ""
description: Sample value
- name: random1
base_type: float
storage_dtype: f8
unit: ""
description: Random values
"""
ds_schema = yaml.load(ds_schema_str)
ds_id = create_simple_unique_id()
ds_filename = self.container.file_system.get("%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))
self.hdf5_persist = DatasetHDF5Persistence.get_persistence(ds_id, ds_schema, "hdf5")
self.hdf5_persist.require_dataset()
self.assertTrue(os.path.exists(ds_filename))
self.addCleanup(os.remove, ds_filename)
# Add 100 values in packets of 10 (right up to the prune trigger)
for i in xrange(10):
packet = self._get_data_packet(i * 10, 10)
self.hdf5_persist.extend_dataset(packet)
data_res = self.hdf5_persist.get_data()
self.assertEqual(len(data_res["time"]), 100)
self.assertEqual(len(data_res["var1"]), 100)
self.assertEqual(len(data_res["random1"]), 100)
self.assertEqual(data_res["var1"][1], 1.0)
log.info("*** STEP 2: First prune")
# Add 2 values (stepping across the prune trigger - inclusive boundary)
packet = self._get_data_packet(100, 2)
self.hdf5_persist.extend_dataset(packet)
data_res = self.hdf5_persist.get_data()
self.assertEqual(len(data_res["time"]), 51)
self.assertEqual(len(data_res["var1"]), 51)
self.assertEqual(len(data_res["random1"]), 51)
self.assertEqual(data_res["var1"][0], 51.0)
self.assertEqual(data_res["var1"][50], 101.0)
log.info("*** STEP 3: Additional data")
# Add 100 values in packets of 10 (right up to the prune trigger)
packet = self._get_data_packet(102, 8)
self.hdf5_persist.extend_dataset(packet)
for i in xrange(4):
packet = self._get_data_packet(110 + i * 10, 10)
self.hdf5_persist.extend_dataset(packet)
packet = self._get_data_packet(150, 2)
self.hdf5_persist.extend_dataset(packet)
data_res = self.hdf5_persist.get_data()
self.assertEqual(len(data_res["time"]), 101)
self.assertEqual(data_res["var1"][0], 51.0)
self.assertEqual(data_res["var1"][100], 151.0)
log.info("*** STEP 4: Second prune")
packet = self._get_data_packet(152, 1)
self.hdf5_persist.extend_dataset(packet)
data_res = self.hdf5_persist.get_data()
self.assertEqual(len(data_res["time"]), 51)
self.assertEqual(data_res["var1"][0], 102.0)
self.assertEqual(data_res["var1"][50], 152.0)
log.info("*** STEP 5: Third prune")
packet = self._get_data_packet(153, 100)
self.hdf5_persist.extend_dataset(packet)
data_res = self.hdf5_persist.get_data()
self.assertEqual(len(data_res["time"]), 51)
self.assertEqual(data_res["var1"][0], 202.0)
self.assertEqual(data_res["var1"][50], 252.0)
def test_hdf5_persist_decimate(self):
# Test HDF5 writing, time indexing, array extension etc
ds_schema_str = """
type: scion_data_schema_1
description: Schema for test datasets
attributes:
basic_shape: 1d_timeseries
time_variable: time
persistence:
format: hdf5
layout: vars_individual
row_increment: 1000
time_index_step: 1000
variables:
- name: time
base_type: ntp_time
storage_dtype: i8
unit: ""
description: NTPv4 timestamp
- name: var1
base_type: float
storage_dtype: f8
unit: ""
description: Sample value
- name: random1
base_type: float
storage_dtype: f8
unit: ""
description: Random values
"""
ds_schema = yaml.load(ds_schema_str)
ds_id = create_simple_unique_id()
ds_filename = self.container.file_system.get("%s/%s%s.hdf5" % (DS_BASE_PATH, DS_FILE_PREFIX, ds_id))
self.hdf5_persist = DatasetHDF5Persistence.get_persistence(ds_id, ds_schema, "hdf5")
self.hdf5_persist.require_dataset()
self.assertTrue(os.path.exists(ds_filename))
self.addCleanup(os.remove, ds_filename)
# Add 100000 values in packets of 100
for i in xrange(100):
packet = self._get_data_packet(i * 100, 100)
self.hdf5_persist.extend_dataset(packet)
data_res = self.hdf5_persist.get_data()
self.assertEqual(len(data_res), 3)
self.assertEqual(len(data_res["time"]), 10000)
data_res = self.hdf5_persist.get_data(dict(max_rows=999, decimate=True, decimate_method="minmax"))
self.assertEqual(len(data_res), 3)
self.assertLessEqual(len(data_res["time"]), 1000)
| StarcoderdataPython |
189854 | from lms.lmstests.sandbox.config import celery as celery_config
from lms.lmstests.sandbox.linters import tasks as flake8_tasks
celery_app = celery_config.app
__all__ = ('flake8_tasks', 'celery_app')
| StarcoderdataPython |
4869423 | from GUI import GUI
from HAL import HAL
import math
import numpy as np
# Enter sequential code!
threshold_angle = 0.01
threshold_distance = 0.25
threshold = 0.01
kp = 1.0
while True:
# Enter iterative code!
# creating Objects
currentTarget = GUI.map.getNextTarget()
laser_data = HAL.getLaserData ()
# definging Variables
pos = [HAL.getPose3d().x, HAL.getPose3d().y]
dist = [-1*(currentTarget.getPose().x - pos[0]), -1*(currentTarget.getPose().y - pos[1])]
mod_dist = math.sqrt(dist[0]**2 + dist[1]**2)
GUI.map.targetx = currentTarget.getPose().x
GUI.map.targety = currentTarget.getPose().y
if (dist[1] <= 0.001):
currentTarget.setReached(True)
GUI.map.carx = 1.5*(dist[0] / mod_dist)
GUI.map.cary = 1.5*(dist[1] / mod_dist)
GUI.map.obsx = 0.0
GUI.map.obsy = 0.0
# Creating the Histogram
hist = []
for a in range(len(laser_data.values)):
d = laser_data.values[a]
if d < threshold_distance:
hist.append(threshold_distance - d)
# Smoothening the histogram
for i, j in enumerate(hist):
if(i>0 and i<len(hist)-1):
hist[i] = (hist[i-1] + hist[i] + hist[i+1])/3.0
if hist[i] < threshold:
hist[i] = 0.0
h = 0
l_min = -1
l_max = -1
g_min = -1
g_max = -1
global_count = 0
while h < len(hist):
count = 0
if hist[h] == 0 :
l_min = h
while h < len(hist) and hist[h] == 0:
count += 1
h += 1
l_max = h
if count > global_count:
global_count = count
g_min = l_min
g_max = l_max
h += 1
obstacle_vect = np.zeros((2,2), dtype=float)
dir = 0
if g_min != -1 and g_max!= -1:
dir = int((g_max + g_min)/2.0)
obstacle_vect[0] = math.cos(math.radians(dir))
obstacle_vect[1] = math.sin(math.radians(dir))
else:
HAL.motors.sendV(0.0)
HAL.motors.sendW(0.0)
break
GUI.map.obsx = obstacle_vect[0]
avgx = dist[0]/mod_dist + obstacle_vect[0]
avgy = dist[1]/mod_dist + obstacle_vect[1]
GUI.map.avgx = avgx
GUI.map.avgy = avgy
# angle = 0
# if(avgy != 0):
# angle = -1*math.atan(avgx/ avgy)
# else:
# if (avgx > 0):
# angle = -1.57
# else:
# angle = 1.57
angle = -1 * math.radians(dir)
if(abs(angle) < threshold_angle):
angle = 0
HAL.motors.sendW(kp*angle)
#HAL.motors.sendW(-0.1)
HAL.motors.sendV(2.0)
console.print(str(angle) + " " + str(obstacle_vect[0])) | StarcoderdataPython |
1791333 | <reponame>sdch10/Storm-time-TEC
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 11:02:28 2019
### STORM STUDY FOR SMALLER SPATIO-TEMPORAL AVERAGING ####
@author: sdch10
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
import matplotlib.mlab as mlab
from matplotlib import cm
from mpl_toolkits.basemap import Basemap
from matplotlib.colors import BoundaryNorm
import h5py as hf
import datetime as dt
import glob
import pandas as pd
from madrigalWeb import madrigalWeb as md
import matplotlib.colors as colors
import matplotlib
from scipy.stats import spearmanr as spear
from scipy.stats import pearsonr as pearson
import pyIGRF as igrf
from dask import delayed as delayed
import dask.dataframe as dskdf
import time as timeclock
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import stats, linalg
from astral import Astral
def partial_corr(C):
"""
Returns the sample linear partial correlation coefficients between pairs of variables in C, controlling
for the remaining variables in C.
Parameters
----------
C : array-like, shape (n, p)
Array with the different variables. Each column of C is taken as a variable
Returns
-------
P : array-like, shape (p, p)
P[i, j] contains the partial correlation of C[:, i] and C[:, j] controlling
for the remaining variables in C.
"""
C = np.asarray(C)
p = C.shape[1]
P_corr = np.zeros((p, p), dtype=np.float)
for i in range(p):
P_corr[i, i] = 1
for j in range(i+1, p):
idx = np.ones(p, dtype=np.bool)
idx[i] = False
idx[j] = False
beta_i = linalg.lstsq(C[:, idx], C[:, j])[0]
beta_j = linalg.lstsq(C[:, idx], C[:, i])[0]
res_j = C[:, j] - C[:, idx].dot( beta_i)
res_i = C[:, i] - C[:, idx].dot(beta_j)
corr = stats.pearsonr(res_i, res_j)[0]
P_corr[i, j] = corr
P_corr[j, i] = corr
return P_corr
class MidpointNormalize(colors.Normalize):
"""Normalise the colorbar."""
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
colors.Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y), np.isnan(value))
def makeTECgrid(latitudearr,longitudearr,TECarray,UTstringarr,timechoice):
### gives timesnapshot of: 1) meshgrid of longitude 2) meshgrid of latitude, 3)2-D mesh of TEC
### inputs: 1-D latitude array, 1-D longitude array, 1-D timearray (sequential) and the time snapshot desired
latsteps=np.unique(latitudearr)
lonsteps=np.unique(longitudearr)
lonx,laty=np.meshgrid(lonsteps,latsteps)
TECgrid=np.zeros([np.size(latsteps),np.size(lonsteps)])
length=np.size(latitudearr)
for i in range(length):
if UTstringarr[i]==timechoice:
longind=np.where(lonsteps==longitudearr[i])[0]
latind=np.where(latsteps==latitudearr[i])[0]
TECgrid[latind[0],longind[0]]=TECarray[i]
else:
continue
return lonx,laty,TECgrid
def getfilename(date):
yy=date.year
mm=date.month
if len(str(mm))<2:
filestr=str(yy)+str(0)+str(mm)
else:
filestr=str(yy)+str(mm)
hdf5_base="/home/sdch10/DATA/geomag/symh/hdf5/%d*.h5"
fname=glob.glob(hdf5_base%int(filestr))[0]
return fname
def new_check_symh(yy,mm,dd):
thisdate=dt.datetime(yy,mm,dd,0,0,0)
day_start_inspect=thisdate+dt.timedelta(days=-3)
day_end_inspect=thisdate+dt.timedelta(days=4)
##### READ SYM-H FILES ##############
hdf5_base="/home/sdch10/DATA/geomag/symh/hdf5/%d*.h5"
start_month=day_start_inspect.month
end_month=day_end_inspect.month
storm_month=mm
start_yymonth_str=str(yy)+'{:02d}'.format(start_month)
end_yymonth_str=str(yy)+'{:02d}'.format(end_month)
storm_yymonth_str=str(yy)+'{:02d}'.format(storm_month)
if start_month==storm_month and not storm_month==end_month:
label=1
elif storm_month==end_month and not start_month==storm_month:
label=2
else:
label=3
print(label)
if label==1:
fname1=glob.glob(hdf5_base%int(start_yymonth_str))[0]
o1=pd.read_hdf(fname1, mode="r", key="df")
fname2=glob.glob(hdf5_base%int(end_yymonth_str))[0]
o2=pd.read_hdf(fname2, mode="r", key="df")
o=pd.concat([o1,o2])
elif label==2:
fname1=glob.glob(hdf5_base%int(start_yymonth_str))[0]
o1=pd.read_hdf(fname1, mode="r", key="df")
fname2=glob.glob(hdf5_base%int(storm_yymonth_str))[0]
o2=pd.read_hdf(fname2, mode="r", key="df")
o=pd.concat([o1,o2])
else:
fname=glob.glob(hdf5_base%int(start_yymonth_str))[0]
o=pd.read_hdf(fname, mode="r", key="df")
###################################
mask = (o['DATE'] > day_start_inspect) & (o['DATE'] <= day_end_inspect)
data=o.loc[mask]
######### ONSET ################
onset=data.loc[data['SYM-H']>0]
onsetdates=onset.DATE.apply(lambda x: x.strftime("%d %H:%M"))
if np.size(onset.to_numpy()):
onsetmaxpos=onset.loc[onset['SYM-H'].idxmax()]
onsetmaxtime=str(onsetmaxpos.DATE.day)+' '+str(onsetmaxpos.DATE.hour)+':'+str(onsetmaxpos.DATE.minute)
onsetispos=True
else:
onsetispos=False
print "NO POSITIVE SYM-H TO BE SHOWN AS ONSET"
#####
minind=data.loc[data['SYM-H'].idxmin()]
stringminind=str(minind.DATE.day)+' '+str(minind.DATE.hour)+':'+str(minind.DATE.minute)
if onsetispos:
print ("Onset Time: %s"%onsetdates.to_list()[0])
print ("MAX ONSET time: %s"%onsetmaxtime)
print ("Min Sym-H Time: %s"%stringminind)
print ("Min Sym-H =",minind['SYM-H'])
############ PLOT ###############
f=plt.figure()
ax=f.add_subplot(111)
ax.plot(data['DATE'],data['SYM-H'],lw=2.0,c='r')
ax.axhline(y=0,c='k',lw=2.0)
ax.axvline(x=minind['DATE'],c='b',lw=3.0)
if onsetispos:
ax.axvline(x=onsetmaxpos['DATE'],c='g',lw=3.0)
# ax.axvline(x=onset[onset.index==onset.index[0]]['DATE'],c='r',lw=3.0)
plt.show()
# print(data[(data['DATE']>dt.datetime(yy,mm,dd,4,0,0))&(data['DATE']<dt.datetime(yy,mm,dd,5,0,0))])
def get_onset_time_new_set():
dates=new_set_stormdays()
hdf5_base="/home/sdch10/DATA/geomag/symh/hdf5/%d*.h5"
proxy_onset_times=[]
min_symh_times=[]
for date in dates:
yy=date.year
mm=date.month
dd=date.day
thisdate=dt.datetime(yy,mm,dd,0,0,0)
day_start_inspect=thisdate+dt.timedelta(days=-1)
day_end_inspect=thisdate+dt.timedelta(days=1)
##### READ SYM-H FILES ##############
start_month=day_start_inspect.month
end_month=day_end_inspect.month
storm_month=mm
start_yymonth_str=str(yy)+'{:02d}'.format(start_month)
end_yymonth_str=str(yy)+'{:02d}'.format(end_month)
storm_yymonth_str=str(yy)+'{:02d}'.format(storm_month)
########
if start_month==storm_month and not storm_month==end_month:
label=1
elif storm_month==end_month and not start_month==storm_month:
label=2
else:
label=3
########
if label==1:
fname1=glob.glob(hdf5_base%int(start_yymonth_str))[0]
o1=pd.read_hdf(fname1, mode="r", key="df")
fname2=glob.glob(hdf5_base%int(end_yymonth_str))[0]
o2=pd.read_hdf(fname2, mode="r", key="df")
o=pd.concat([o1,o2])
o.reset_index(drop=True,inplace=True)
elif label==2:
fname1=glob.glob(hdf5_base%int(start_yymonth_str))[0]
o1=pd.read_hdf(fname1, mode="r", key="df")
fname2=glob.glob(hdf5_base%int(storm_yymonth_str))[0]
o2=pd.read_hdf(fname2, mode="r", key="df")
o=pd.concat([o1,o2])
o.reset_index(drop=True,inplace=True)
else:
fname=glob.glob(hdf5_base%int(start_yymonth_str))[0]
o=pd.read_hdf(fname, mode="r", key="df")
########
mask = (o['DATE'] >= day_start_inspect) & (o['DATE'] <= day_end_inspect)
data=o.loc[mask]
minloc=data['SYM-H'].idxmin()
isthiszeroloc=minloc
while data.loc[isthiszeroloc]['SYM-H']<0:
isthiszeroloc=isthiszeroloc-1
zero=data.loc[isthiszeroloc]
zero_crossing_dt=dt.datetime(zero.DATE.year,zero.DATE.month,zero.DATE.day,zero.DATE.hour,zero.DATE.minute,zero.DATE.second)
mini=data.loc[minloc]
min_symh_dt=dt.datetime(mini.DATE.year,mini.DATE.month,mini.DATE.day,mini.DATE.hour,mini.DATE.minute,mini.DATE.second)
proxy_onset_times.append(zero_crossing_dt)
min_symh_times.append(min_symh_dt)
return proxy_onset_times,min_symh_times
def new_set_stormdays():
### gives days corresponding to observed min sym-h for the storms
dates=[dt.datetime(2000,2,12),dt.datetime(2000,4,7),dt.datetime(2000,9,18),
dt.datetime(2000,10,29),dt.datetime(2001,3,20),dt.datetime(2001,3,31),
dt.datetime(2001,4,11),dt.datetime(2001,4,18),dt.datetime(2001,8,17),
dt.datetime(2001,9,26),dt.datetime(2001,10,21),dt.datetime(2001,10,28),
dt.datetime(2002,3,24),dt.datetime(2002,4,18),dt.datetime(2002,8,2),
dt.datetime(2002,9,4),dt.datetime(2003,8,18),
dt.datetime(2004,2,11),dt.datetime(2004,3,10),dt.datetime(2004,4,4),
dt.datetime(2004,8,30),dt.datetime(2005,8,24),dt.datetime(2005,8,31),
dt.datetime(2005,9,11),dt.datetime(2006,4,14),dt.datetime(2011,8,6),
dt.datetime(2011,9,26),dt.datetime(2011,10,25),dt.datetime(2012,3,9),
dt.datetime(2012,4,24),dt.datetime(2012,10,1),dt.datetime(2013,3,17),
dt.datetime(2014,2,19),dt.datetime(2014,2,27),dt.datetime(2015,3,17),
dt.datetime(2016,10,13),dt.datetime(2018,8,26)]
return dates
def quiet_days_new_set():
quietdays=[[18,19,4,17,20],[26,14,22,18,25],[10,14,11,9,22],
[20,8,21,9,6],[15,16,26,11,17],[15,16,26,11,17],
[30,27,24,19,25],[30,27,24,19,25],[16,24,15,11,29],
[10,7,9,1,6],[24,18,7,17,26],[24,18,7,17,26],
[17,28,14,16,27],[8,9,26,5,25],[6,24,7,5,25],
[23,25,24,29,20],[31,5,16,27,4],
[26,17,8,20,10],[24,7,6,8,25],[2,12,22,20,29],
[4,8,24,3,25],[11,20,30,12,28],[11,20,30,12,28],
[24,21,20,25,8],[30,12,1,3,2],[31,18,19,3,21],
[23,19,1,16,8],[14,22,23,28,29],[26,29,25,31,20],
[30,6,9,8,16],[20,4,29,30,22],[8,7,26,25,13],
[13,26,14,25,2],[13,26,14,25,2],[10,30,5,14,9],
[21,11,20,9,22],[6,14,10,13,23]]
return quietdays
def gen_average_storm():
### Average Storm
##
# onsets,minsyms=gen_storm_arrays()
onsets,minsyms=get_onset_time_new_set()
delh=[]
symprofile=[]
#############################
### THE FEATURES
### #########################
minsymh=[]
onsethour=[]
falloffslope=[]
fallofftime=[]
rangesymh=[]
for starttime,mintime in zip(onsets,minsyms):
###
###go back 24 hours
start=np.datetime64(starttime)
windowstart=starttime+dt.timedelta(days=-1)
### fast forward 48 hours
windowend=starttime+dt.timedelta(days=2)
##############################################################
### GET DATA for window start time ###########################
##############################################################
if windowstart.month==windowend.month:
fname=getfilename(windowstart)
o=pd.read_hdf(fname, mode="r", key="df")
######
mask = (o['DATE'] > windowstart) & (o['DATE'] <= windowend)
data=o.loc[mask]
symh=data['SYM-H'].to_numpy()
timestamps=data['DATE'].to_numpy()
timeoffsets=np.array([(tstamp-start)/np.timedelta64(1,'h') for tstamp in timestamps ])
else:
fname1=getfilename(windowstart)
fname2=getfilename(windowend)
o1=pd.read_hdf(fname1, mode="r", key="df")
o2=pd.read_hdf(fname2, mode="r", key="df")
o=pd.concat([o1,o2])
#####
mask = (o['DATE'] > windowstart) & (o['DATE'] <= windowend)
data=o.loc[mask]
symh=data['SYM-H'].to_numpy()
timestamps=data['DATE'].to_numpy()
timeoffsets=np.array([(tstamp-start)/np.timedelta64(1,'h') for tstamp in timestamps ])
falloff=(mintime-starttime).total_seconds()/3600.
minsymh.append(np.min(symh))
onsethour.append(starttime.hour+starttime.minute/60.)
fallofftime.append(falloff)
rangesymh.append(0.-np.min(symh))
falloffslope.append((0.-np.min(symh))/falloff)
#### RECORD DATA
delh.append(timeoffsets)
symprofile.append(symh)
delh=np.array(delh)
symprofile=np.array(symprofile)
avgstorm=symprofile.mean(0)
medstorm=np.median(symprofile,axis=0)
upperlim=np.max(symprofile,axis=0)
lowerlim=np.min(symprofile,axis=0)
# f=plt.figure()
# ax=f.add_subplot(111)
# ax.plot(delh[0],medstorm,'r-o')
# ax.fill_between(delh[0],lowerlim,upperlim,color='c',alpha=0.5)
# ax.set_xlabel("Hours after onset", fontsize=35)
# ax.set_ylabel("SYM-H (nT)",fontsize=35)
# ax.set_xlim([-24,48])
# for tick in ax.xaxis.get_major_ticks():
# tick.label.set_fontsize(25)
# for tick in ax.yaxis.get_major_ticks():
# tick.label.set_fontsize(25)
# plt.show()
return onsethour,fallofftime,falloffslope,minsymh,rangesymh,symprofile
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
'''''''''' DOWNLOAD STORM TIME TEC FILES '''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def download_TEC_storm():
#constants
madrigalUrl = 'http://cedar.openmadrigal.org'
# madrigalUrl = 'http://millstonehill.haystack.mit.edu'
# instrument = 'World-wide GPS Receiver Network'
instrument = 'World-wide GNSS Receiver Network'
user_fullname = '<NAME>'
user_email = '<EMAIL>'
user_affiliation = 'Virginia Tech'
# create the main object to get all needed info from Madrigal
madrigalObj = md.MadrigalData(madrigalUrl)
# these next few lines convert instrument name to code
code = None
instList = madrigalObj.getAllInstruments()
for inst in instList:
if inst.name.lower() == instrument.lower():
code = inst.code
print "Found instrument!"
print code
break
if code == None:
raise ValueError, 'Unknown instrument %s' % (instrument)
onsets,minsyms=get_onset_time_new_set()
for start in onsets:
yy=start.year
mm=start.month
dd=start.day
##
end=start+dt.timedelta(days=1)
nextyy=end.year
nextmm=end.month
nextdd=end.day
###
print "Now fetching file for:"
print str(yy) + "-" + str(mm) + "-" + str(dd)
print str(nextyy) + "-" + str(nextmm) + "-" + str(nextdd)
########
expList = madrigalObj.getExperiments(code,yy,mm,dd,0,0,0,nextyy,nextmm,nextdd,0,0,0)
for exp in expList:
print exp.startday,'-',exp.endday
if (exp.startday==dd or exp.startday==nextdd):
print "Here's the experiment!"
# print (str(exp) + '\n')
fileList = madrigalObj.getExperimentFiles(exp.id)
for thisFile in fileList:
if thisFile.category == 1 and str(thisFile.name.split('/')[-1])[0:3]=='gps':
print (str(thisFile.name) + '\n')
thisFilename = thisFile.name
onlyFileName = "Storm_"+str(exp.startyear) + "_" + str(exp.startmonth) + "_" + str(exp.startday)+".hdf5"
# f = open("/home/sdch10/Datafiles/Storm_TEC_pred/" + onlyFileName[len(onlyFileName)-1],"w")
# f.close()
print "Beginning download for:"
print str(exp.startyear) + "-" + str(exp.startmonth) + "-" + str(exp.startday)
madrigalObj.downloadFile(thisFilename, "/home/sdch10/Datafiles/TEC_Files_Storm_Study/Storm_Days/" + onlyFileName, user_fullname, user_email, user_affiliation, "hdf5")
print "Completed download for:"
print str(exp.startyear) + "-" + str(exp.startmonth) + "-" + str(exp.startday)
else:
print "Not this file",exp.startday,'-',exp.endday
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
'''''''''' DOWNLOAD QUIET TIME TEC FILES '''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def download_TEC_quiet():
madrigalUrl = 'http://cedar.openmadrigal.org'
# madrigalUrl = 'http://millstonehill.haystack.mit.edu'
instrument = 'World-wide GNSS Receiver Network'
user_fullname = '<NAME>'
user_email = '<EMAIL>'
user_affiliation = 'Virginia Tech'
# create the main object to get all needed info from Madrigal
madrigalObj = md.MadrigalData(madrigalUrl)
# these next few lines convert instrument name to code
code = None
instList = madrigalObj.getAllInstruments()
for inst in instList:
if inst.name.lower() == instrument.lower():
code = inst.code
print "Found instrument!"
print code
break
if code == None:
raise ValueError, 'Unknown instrument %s' % (instrument)
onsets,minsyms=get_onset_time_new_set()
quiet=quiet_days_new_set()
for qc,start in zip(quiet,onsets):
yy=start.year
mm=start.month
stormday=start.day
for dd in qc:
thisday=dt.datetime(yy,mm,dd)
end=thisday+dt.timedelta(days=1)
nextyy=end.year
nextmm=end.month
nextdd=end.day
print "Now fetching file for:"
print str(yy) + "-" + str(mm) + "-" + str(dd)
expList = madrigalObj.getExperiments(code,yy,mm,dd,0,0,0,nextyy,nextmm,nextdd,0,0,0)
for exp in expList:
print exp.startday,'-',exp.endday
if (exp.startday==dd):
print "Here's the experiment!"
# print (str(exp) + '\n')
fileList = madrigalObj.getExperimentFiles(exp.id)
for thisFile in fileList:
if thisFile.category == 1 and str(thisFile.name.split('/')[-1])[0:3]=='gps':
print (str(thisFile.name) + '\n')
thisFilename = thisFile.name
onlyFileName = "Quiet_"+str(yy) + "_" + str(mm) + "_" + str(stormday)+ "_" +str(dd)+".hdf5"
# f = open("/home/sdch10/Datafiles/Storm_TEC_pred/" + onlyFileName[len(onlyFileName)-1],"w")
# f.close()
print "Beginning download for:"
print str(exp.startyear) + "-" + str(exp.startmonth) + "-" + str(exp.startday)
madrigalObj.downloadFile(thisFilename, "/home/sdch10/Datafiles/TEC_Files_Storm_Study/Quiet_Days/" + onlyFileName, user_fullname, user_email, user_affiliation, "hdf5")
print "Completed download for:"
print str(exp.startyear) + "-" + str(exp.startmonth) + "-" + str(exp.startday)
else:
print "Not this file",exp.startday,'-',exp.endday
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''' EXTRACT DATA FROM TEC FILES'''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def extract_data(string,start,quietday):
yy=start.year
mm=start.month
dd=start.day
##
end=start+dt.timedelta(days=1)
nextyy=end.year
nextmm=end.month
nextdd=end.day
if string=='storm':
hdf5_base='/home/sdch10/Datafiles/TEC_Files_Storm_Study/Storm_Days/Storm_%d_%d_%d.hdf5'
fname=glob.glob(hdf5_base%(yy,mm,dd))[0]
f=hf.File(fname,'r')
data1=f['Data']['Table Layout']
data1=np.array(data1)
f.close()
fname2=glob.glob(hdf5_base%(nextyy,nextmm,nextdd))[0]
f2=hf.File(fname2,'r')
data2=f2['Data']['Table Layout']
data2=np.array(data2)
f2.close()
### get times as datetimes
timearray1=np.array([dt.datetime(int(item['year']),int(item['month']),int(item['day']),int(item['hour']),int(item['min']),int(item['sec'])) for item in data1])
lat1=np.array([item['gdlat'] for item in data1])
lon1=np.array([item['glon'] for item in data1])
TEC1=np.array([item['tec'] for item in data1])
# alt1=np.array([item['gdalt'] for item in data1])
timearray2=np.array([dt.datetime(int(item['year']),int(item['month']),int(item['day']),int(item['hour']),int(item['min']),int(item['sec'])) for item in data2])
lat2=np.array([item['gdlat'] for item in data2])
lon2=np.array([item['glon'] for item in data2])
TEC2=np.array([item['tec'] for item in data2])
# alt2=np.array([item['gdalt'] for item in data2])
timearray=np.append(timearray1,timearray2)
lat=np.append(lat1,lat2)
lon=np.append(lon1,lon2)
TEC=np.append(TEC1,TEC2)
alt=350.*np.ones(len(TEC))
else:
hdf5_base='/home/sdch10/Datafiles/TEC_Files_Storm_Study/Quiet_Days/Quiet_%d_%d_%d_%d.hdf5'
fname=glob.glob(hdf5_base%(yy,mm,dd,quietday))[0]
f=hf.File(fname,'r')
data1=f['Data']['Table Layout']
data1=np.array(data1)
f.close()
### get times as datetimes
timearray=np.array([dt.datetime(int(item['year']),int(item['month']),int(item['day']),int(item['hour']),int(item['min']),int(item['sec'])) for item in data1])
lat=np.array([item['gdlat'] for item in data1])
lon=np.array([item['glon'] for item in data1])
TEC=np.array([item['tec'] for item in data1])
alt=350.*np.ones(len(TEC))
return timearray,lat,lon,TEC,alt
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''' MAKE TEC GRID SECTORS BY DIP AND DECLINATION '''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def make_magnetic_grids(date,lats,lons,alts,TECs,times):
year=date.year
lat0=20
lat1=50
lon0=-125
lon1=-60
mask=(lats>=lat0)&(lats<=lat1)&(lons<=lon1)&(lons>=lon0)
latitude=lats[mask]
longitude=lons[mask]
altitude=alts[mask]
tec=TECs[mask]
t=times[mask]
declarr=[]
inclarr=[]
for lati,loni,alti in zip(latitude,longitude,altitude):
ret=igrf.igrf_value(lati,loni,alti,year=year)
declarr.append(ret[0])
inclarr.append(ret[1])
# decl=declarr.compute()
# incl=inclarr.compute()
decl=np.array(declarr)
incl=np.array(inclarr)
mask_HE=(decl<0)&(incl>65)
mask_ME=(decl<0)&(incl>55)&(incl<=65)
mask_LE=(decl<0)&(incl<=55)
mask_HW=(decl>=0)&(incl>65)
mask_MW=(decl>=0)&(incl>55)&(incl<=65)
mask_LW=(decl>=0)&(incl<=55)
# if string=='HE':
# return latitude[mask_HE],longitude[mask_HE],altitude[mask_HE],tec[mask_HE],t[mask_HE]
# elif string=='ME':
# return latitude[mask_ME],longitude[mask_ME],altitude[mask_ME],tec[mask_ME],t[mask_ME]
# elif string=='LE':
# return latitude[mask_LE],longitude[mask_LE],altitude[mask_LE],tec[mask_LE],t[mask_LE]
# elif string=='HW':
# return latitude[mask_HW],longitude[mask_HW],altitude[mask_HW],tec[mask_HW],t[mask_HW]
# elif string=='MW':
# return latitude[mask_MW],longitude[mask_MW],altitude[mask_MW],tec[mask_MW],t[mask_MW]
# elif string=='LW':
# return latitude[mask_LW],longitude[mask_LW],altitude[mask_LW],tec[mask_LW],t[mask_HE]
latgrid=[latitude[mask_HE],latitude[mask_ME],latitude[mask_LE],latitude[mask_HW],latitude[mask_MW],latitude[mask_LW]]
longgrid=[longitude[mask_HE],longitude[mask_ME],longitude[mask_LE],longitude[mask_HW],longitude[mask_MW],longitude[mask_LW]]
altgrid=[altitude[mask_HE],altitude[mask_ME],altitude[mask_LE],altitude[mask_HW],altitude[mask_MW],altitude[mask_LW]]
tecgrid=[tec[mask_HE],tec[mask_ME],tec[mask_LE],tec[mask_HW],tec[mask_MW],tec[mask_LW]]
timegrid=[t[mask_HE],t[mask_ME],t[mask_LE],t[mask_HW],t[mask_MW],t[mask_LW]]
return latgrid,longgrid,altgrid,tecgrid,timegrid
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''' PROCESS HDF5 TEC FILES TO OBTAIN STORM DATA'''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def storm_process_TEC_US():
onsets,minsyms=get_onset_time_new_set()
storm_hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Storm_Days/Storm_%d_%s.h5'
domains=['HE','ME','LE','HW','MW','LW']
for start,index in zip(onsets,range(len(onsets))):
##############################################
######### STORM DATA FIRST ##################
##############################################
print("GETTING STORM DATA FOR Storm %d on %d/%d/%d"%(index,start.year,start.month,start.day))
windowend=start+dt.timedelta(days=1)
timearray,lat,lon,TEC,alt=extract_data('storm',start,0)
mask=(timearray>start)&(timearray<windowend)
lats=lat[mask]
lons=lon[mask]
TECs=TEC[mask]
alts=alt[mask]
times=timearray[mask]
latgrid,longgrid,altgrid,tecgrid,timegrid=make_magnetic_grids(start,lats,lons,alts,TECs,times)
for loc in range(6):
domain=domains[loc]
timestamps=timegrid[loc]
tec=tecgrid[loc]
uniquetimes,counts=np.unique(timestamps,return_counts=True)
cum=np.cumsum(counts)
cumul=np.append(np.array([0]),cum)
TECarray=np.array([])
for i in range(1,len(cumul)):
avgTEC=np.mean(tec[cumul[i-1]:cumul[i]])
TECarray=np.append(TECarray,avgTEC)
####### SAVE TO FILE
print("Writing to file for storm %d in %s"%(index,domain))
fname_storm=storm_hdf5_base%(index,domain)
_df_storm = pd.DataFrame({'time': uniquetimes, 'TEC': TECarray})
_df_storm.to_hdf(fname_storm, mode="w", key="df")
print("Completed! Find file at %s"%(fname_storm))
###########################
print("YAYY! Completed:Storm %d on %d/%d/%d"%(index,start.year,start.month,start.day))
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
''' PROCESS HDF5 TEC FILES TO OBTAIN QUIET DATA'''''''''''''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def quiet_process_TEC_US():
onsets,minsyms=get_onset_time_new_set()
quiet=quiet_days_new_set()
quiet_hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Quiet_Days/Quiet_%d_%s.h5'
#################################################
################## QUIET ########################
#################################################
domains=['HE','ME','LE','HW','MW','LW']
for qt,start,index in zip(quiet,onsets,range(len(onsets))):
print("GETTING QUIET DATA FOR Storm %d on %d/%d/%d"%(index,start.year,start.month,start.day))
df_HE=[]
df_ME=[]
df_LE=[]
df_HW=[]
df_MW=[]
df_LW=[]
### GET data for each quiet day
for q in qt:
print("Getting data for Quiet Day %d"%(q))
times,lats,lons,TECs,alts=extract_data('quiet',start,q)
latgrid,longgrid,altgrid,tecgrid,timegrid=make_magnetic_grids(start,lats,lons,alts,TECs,times)
### ASSIGN to each sector
for loc in range(6):
timestamps=timegrid[loc]
tec=tecgrid[loc]
uniquetimes,counts=np.unique(timestamps,return_counts=True)
cum=np.cumsum(counts)
cumul=np.append(np.array([0]),cum)
TECarray=np.array([])
for i in range(1,len(cumul)):
avgTEC=np.mean(tec[cumul[i-1]:cumul[i]])
TECarray=np.append(TECarray,avgTEC)
if loc==0:
df_HE.append(TECarray)
elif loc==1:
df_ME.append(TECarray)
elif loc==2:
df_LE.append(TECarray)
elif loc==3:
df_HW.append(TECarray)
elif loc==4:
df_MW.append(TECarray)
elif loc==5:
df_LW.append(TECarray)
fname_quiet_HE=quiet_hdf5_base%(index,'HE')
fname_quiet_ME=quiet_hdf5_base%(index,'ME')
fname_quiet_LE=quiet_hdf5_base%(index,'LE')
fname_quiet_HW=quiet_hdf5_base%(index,'HW')
fname_quiet_MW=quiet_hdf5_base%(index,'MW')
fname_quiet_LW=quiet_hdf5_base%(index,'LW')
### WRITE TO FILE
print("Writing to file for storm %d in all domains"%(index))
_df_quiet_HE = pd.DataFrame({'time': uniquetimes, 'TEC1':df_HE[0],
'TEC2':df_HE[1],'TEC3':df_HE[2],
'TEC4':df_HE[3],'TEC5':df_HE[4],
'TEC':np.mean(df_HE,axis=0)})
_df_quiet_HE.to_hdf(fname_quiet_HE, mode="w", key="df")
############
_df_quiet_ME = pd.DataFrame({'time': uniquetimes, 'TEC1':df_ME[0],
'TEC2':df_ME[1],'TEC3':df_ME[2],
'TEC4':df_ME[3],'TEC5':df_ME[4],
'TEC':np.mean(df_ME,axis=0)})
_df_quiet_ME.to_hdf(fname_quiet_ME, mode="w", key="df")
#################
_df_quiet_LE = pd.DataFrame({'time': uniquetimes, 'TEC1':df_LE[0],
'TEC2':df_LE[1],'TEC3':df_LE[2],
'TEC4':df_LE[3],'TEC5':df_LE[4],
'TEC':np.mean(df_LE,axis=0)})
_df_quiet_LE.to_hdf(fname_quiet_LE, mode="w", key="df")
#################
_df_quiet_HW = pd.DataFrame({'time': uniquetimes, 'TEC1':df_HW[0],
'TEC2':df_HW[1],'TEC3':df_HW[2],
'TEC4':df_HW[3],'TEC5':df_HW[4],
'TEC':np.mean(df_HW,axis=0)})
_df_quiet_HW.to_hdf(fname_quiet_HW, mode="w", key="df")
############
_df_quiet_MW = pd.DataFrame({'time': uniquetimes, 'TEC1':df_MW[0],
'TEC2':df_MW[1],'TEC3':df_MW[2],
'TEC4':df_MW[3],'TEC5':df_MW[4],
'TEC':np.mean(df_MW,axis=0)})
_df_quiet_MW.to_hdf(fname_quiet_MW, mode="w", key="df")
##############
_df_quiet_LW = pd.DataFrame({'time': uniquetimes, 'TEC1':df_LW[0],
'TEC2':df_LW[1],'TEC3':df_LW[2],
'TEC4':df_LW[3],'TEC5':df_LW[4],
'TEC':np.mean(df_LW,axis=0)})
_df_quiet_LW.to_hdf(fname_quiet_LW, mode="w", key="df")
print("Writing to file completed for %s\n%s\n%s\n%s\n%s\n%s\n"
%(fname_quiet_HE,fname_quiet_ME,fname_quiet_LE,fname_quiet_HW,
fname_quiet_MW,fname_quiet_LW))
print("SHHHHH! 'QUIET'-LY Completed:Storm %d on %d/%d/%d"%(index,start.year,start.month,start.day))
print("-----------------------------")
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
'''''''''''''''''EXTRACT STORM DATA FOR PRECONDITIONING'''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
def extract_precondition_data(start):
nextyy=start.year
nextmm=start.month
nextdd=start.day
##
## end is actually day before...NEVER MIND
end=start-dt.timedelta(hours=2)
yy=end.year
mm=end.month
dd=end.day
hdf5_base='/home/sdch10/Datafiles/TEC_Files_Storm_Study/Storm_Days/Storm_%d_%d_%d.hdf5'
if not dd==nextdd:
fname=glob.glob(hdf5_base%(yy,mm,dd))[0]
f=hf.File(fname,'r')
data1=f['Data']['Table Layout']
data1=np.array(data1)
f.close()
fname2=glob.glob(hdf5_base%(nextyy,nextmm,nextdd))[0]
f2=hf.File(fname2,'r')
data2=f2['Data']['Table Layout']
data2=np.array(data2)
f2.close()
### get times as datetimes
timearray1=np.array([dt.datetime(int(item['year']),int(item['month']),int(item['day']),int(item['hour']),int(item['min']),int(item['sec'])) for item in data1])
lat1=np.array([item['gdlat'] for item in data1])
lon1=np.array([item['glon'] for item in data1])
TEC1=np.array([item['tec'] for item in data1])
# alt1=np.array([item['gdalt'] for item in data1])
timearray2=np.array([dt.datetime(int(item['year']),int(item['month']),int(item['day']),int(item['hour']),int(item['min']),int(item['sec'])) for item in data2])
lat2=np.array([item['gdlat'] for item in data2])
lon2=np.array([item['glon'] for item in data2])
TEC2=np.array([item['tec'] for item in data2])
# alt2=np.array([item['gdalt'] for item in data2])
timearray=np.append(timearray1,timearray2)
lat=np.append(lat1,lat2)
lon=np.append(lon1,lon2)
TEC=np.append(TEC1,TEC2)
alt=350.*np.ones(len(TEC))
else:
fname=glob.glob(hdf5_base%(yy,mm,dd))[0]
f=hf.File(fname,'r')
data1=f['Data']['Table Layout']
data1=np.array(data1)
f.close()
### get times as datetimes
timearray=np.array([dt.datetime(int(item['year']),int(item['month']),int(item['day']),int(item['hour']),int(item['min']),int(item['sec'])) for item in data1])
lat=np.array([item['gdlat'] for item in data1])
lon=np.array([item['glon'] for item in data1])
TEC=np.array([item['tec'] for item in data1])
alt=350.*np.ones(len(TEC))
return timearray,lat,lon,TEC,alt
""""'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
'''''GENERATE PRECONDITIONING QUIET AND STORM TEC PARAMS'''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''"""""""""
def preconditiong_TEC():
###### get ionosphere history ############
quiet_hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Quiet_Days/Quiet_%d_%s.h5'
onsets,minsyms=get_onset_time_new_set()
domains=['HE','ME','LE','HW','MW','LW']
storm_TEC_precondition=[]
quiet_TEC_precondition=[]
for start,index in zip(onsets,range(len(onsets))):
##############################################
######### STORM DATA FIRST ##################
##############################################
print("GETTING PRECONDITIONING STORM DATA FOR Storm %d on %d/%d/%d"%(index,start.year,start.month,start.day))
windowstart=start-dt.timedelta(hours=2)
timearray,lat,lon,TEC,alt=extract_precondition_data(start)
mask=(timearray>=windowstart)&(timearray<start)
lats=lat[mask]
lons=lon[mask]
TECs=TEC[mask]
alts=alt[mask]
times=timearray[mask]
latgrid,longgrid,altgrid,tecgrid,timegrid=make_magnetic_grids(start,lats,lons,alts,TECs,times)
precondstorm=[]
precondquiet=[]
for domain,loc in zip(domains,range(6)):
############################################
### GET Storm precondition #################
timestamps=timegrid[loc]
tec=tecgrid[loc]
uniquetimes,counts=np.unique(timestamps,return_counts=True)
cum=np.cumsum(counts)
cumul=np.append(np.array([0]),cum)
TECarray=np.array([])
for i in range(1,len(cumul)):
avgTEC=np.mean(tec[cumul[i-1]:cumul[i]])
TECarray=np.append(TECarray,avgTEC)
precondstorm.append(np.mean(TECarray))
########################################
### Now quiet preconditioning #########
quiet_fname=quiet_hdf5_base%(index,domain)
quietdata=pd.read_hdf(quiet_fname,mode="r", key="df")
stime=np.array([pt.strftime('%H:%M:%S') for pt in uniquetimes])
qTEC=quietdata.TEC.to_numpy()
qtime=quietdata.time.apply(lambda x: x.strftime('%H:%M:%S')).to_numpy()
pivot=np.where(qtime==stime[0])[0]
quietTEC=np.roll(qTEC,-pivot[0])
quiettime=np.roll(qtime,-pivot[0])
precondquiet.append(np.mean(quietTEC[0:len(stime)]))
############################################
print("GOT Preconditioning TEC parameters for this storm")
storm_TEC_precondition.append(precondstorm)
quiet_TEC_precondition.append(precondquiet)
return storm_TEC_precondition,quiet_TEC_precondition
""""'''''''''''''''''''''''''''''''''''''''''''''''''
'''''COMPUTE delta TEC for each storm '''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''"""""""""
def get_delta_TEC():
storm_hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Storm_Days/Storm_%d_%s.h5'
quiet_hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Quiet_Days/Quiet_%d_%s.h5'
onsets,minsyms=get_onset_time_new_set()
domains=['HE','ME','LE','HW','MW','LW']
##########
df_HE=[]
df_ME=[]
df_LE=[]
df_HW=[]
df_MW=[]
df_LW=[]
##########
for start,index in zip(onsets,range(len(onsets))):
for domain in domains:
storm_fname=storm_hdf5_base%(index,domain)
quiet_fname=quiet_hdf5_base%(index,domain)
stormdata=pd.read_hdf(storm_fname,mode="r", key="df")
quietdata=pd.read_hdf(quiet_fname,mode="r", key="df")
sTEC=stormdata.TEC.to_numpy()
stime=stormdata.time.apply(lambda x: x.strftime('%H:%M:%S')).to_numpy()
qTEC=quietdata.TEC.to_numpy()
qtime=quietdata.time.apply(lambda x: x.strftime('%H:%M:%S')).to_numpy()
if True in np.isnan(qTEC):
locnan=np.argwhere(np.isnan(qTEC))
for l2 in locnan:
l=l2[0]
qTEC[l]=(qTEC[l-2]+qTEC[l-1]+qTEC[l+1]+qTEC[l+2])/4.0
pivot=np.where(qtime==stime[0])[0]
if not np.array_equal(stime,np.roll(qtime,-pivot[0])):
print ("RED FLAG!! ABORT")
quietTEC=np.roll(qTEC,-pivot[0])
if domain=='HE':
df_HE.append(sTEC)
df_HE.append(quietTEC)
elif domain=='ME':
df_ME.append(sTEC)
df_ME.append(quietTEC)
elif domain=='LE':
df_LE.append(sTEC)
df_LE.append(quietTEC)
elif domain=='HW':
df_HW.append(sTEC)
df_HW.append(quietTEC)
elif domain=='MW':
df_MW.append(sTEC)
df_MW.append(quietTEC)
elif domain=='LW':
df_LW.append(sTEC)
df_LW.append(quietTEC)
return df_HE,df_ME,df_LE,df_HW,df_MW,df_LW
""""'''''''''''''''''''''''''''''''''''''''''''''''''
'''''WRITE input output in hdf5 FILES''''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''"""""""""
def write_to_file_params():
#####################################
onsethour,fallofftime,slope,minsymh,rangesymh,symprofile=gen_average_storm()
ons,mins=get_onset_time_new_set()
onsethour_float=[]
for start in ons:
onsethour_float.append(round(start.hour+start.minute/60.,2))
onsethour_float=np.array(onsethour_float)
onsine=np.sin(2 * np.pi * onsethour_float/24.0)
oncos=np.cos(2 * np.pi * onsethour_float/24.0)
storm_pre,quiet_pre=preconditiong_TEC()
df_HE,df_ME,df_LE,df_HW,df_MW,df_LW=get_delta_TEC()
hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Feature_Study/Datafile_%s.h5'
domains=['HE','ME','LE','HW','MW','LW']
####################################################
##### ONE FILE WITH ALL PARAMETERS #################
for domain,index in zip(domains,range(6)):
if domain=='HE':
data=df_HE
elif domain=='ME':
data=df_ME
elif domain=='LE':
data=df_LE
elif domain=='HW':
data=df_HW
elif domain=='MW':
data=df_MW
elif domain=='LW':
data=df_LW
storm_TEC=data[::2]
quiet_TEC=data[1::2]
############################################
####### Y variables ########################
dTEC=np.array(storm_TEC)-np.array(quiet_TEC)
dTECmean=np.mean(dTEC,axis=1)
dTECstd=np.std(dTEC,axis=1)
dTECabs=np.mean(abs(dTEC),axis=1)
#############################################
###### PRECONDITION VARIABLES ###############
pre_storm=np.array([])
pre_quiet=np.array([])
for s_chunk,q_chunk in zip(storm_pre,quiet_pre):
pre_storm=np.append(pre_storm,s_chunk[index])
pre_quiet=np.append(pre_quiet,q_chunk[index])
pre_delta=pre_storm-pre_quiet
######### SYM PRECONDITION ####################
### GET MEAN SYM 2hrs prior######################
storm_sym_precond=symprofile[:,1440-120:1440]
mean_sym_history=np.mean(storm_sym_precond,axis=1)
######### Mean storm sym #######################
storm_sym=symprofile[:,1440:1440*2]
mean_sym_storm=np.mean(storm_sym,axis=1)
###############################################
############## LOCAL TIME #####################
if domain[1]=='E':
localtimes=onsethour_float-4.0
else:
localtimes=onsethour_float-7.0
locsine=np.sin(2 * np.pi * localtimes/24.0)
loccos=np.cos(2 * np.pi * localtimes/24.0)
_feat=pd.DataFrame({'Storm History':pre_storm,'Quiet History':pre_quiet,'Delta History':pre_delta,
'Fallofftime':np.array(fallofftime),'Slope':np.array(slope),'Minimum SYM':np.array(minsymh),
'Range SYM':rangesymh,'Mean SYM':mean_sym_storm,'SYM History':mean_sym_history,
'Onset UT':onsethour_float,'UT Onset sine':onsine,'UT Onset cos':oncos,
'Onset LT':localtimes,'LT Onset sine':locsine,'LT Onset cos':loccos,
'Average dTEC':dTECmean,'Std dTEC':dTECstd,'Absolute dTEC':dTECabs})
fname=hdf5_base%domain
print("Writing to file for %s"%domain)
_feat.to_hdf(fname,mode="w", key="df")
print("Completed feature listing")
print("---------------")
def main_phase_only_write_to_file():
#####################################
onsethour,fallofftime,slope,minsymh,rangesymh,symprofile=gen_average_storm()
ons,mins=get_onset_time_new_set()
onsethour_float=[]
for start in ons:
onsethour_float.append(round(start.hour+start.minute/60.,2))
onsethour_float=np.array(onsethour_float)
onsine=np.sin(2 * np.pi * onsethour_float/24.0)
oncos=np.cos(2 * np.pi * onsethour_float/24.0)
mains=[]
for on_dt,min_dt in zip(ons,mins):
mains.append(int((min_dt-on_dt).total_seconds()/300.))
storm_pre,quiet_pre=preconditiong_TEC()
df_HE,df_ME,df_LE,df_HW,df_MW,df_LW=get_delta_TEC()
hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Feature_Study/Mainphase_Datafile_%s.h5'
domains=['HE','ME','LE','HW','MW','LW']
####################################################
##### ONE FILE WITH ALL PARAMETERS #################
for domain,index in zip(domains,range(6)):
if domain=='HE':
data=df_HE
elif domain=='ME':
data=df_ME
elif domain=='LE':
data=df_LE
elif domain=='HW':
data=df_HW
elif domain=='MW':
data=df_MW
elif domain=='LW':
data=df_LW
storm_TEC=data[::2]
quiet_TEC=data[1::2]
############################################
####### Y variables ########################
dTEC=np.array(storm_TEC)-np.array(quiet_TEC)
dTECmean=np.array([])
dTECstd=np.array([])
dTECabs=np.array([])
for r in range(np.shape(storm_TEC)[0]):
dTECmean=np.append(dTECmean,np.mean(dTEC[r,0:mains[r]+1]))
dTECstd=np.append(dTECstd,np.std(dTEC[r,0:mains[r]+1]))
dTECabs=np.append(dTECabs,np.mean(abs(dTEC[r,0:mains[r]+1])))
print("Storm Num=%d,mainphase in %d"%(r,mains[r]))
#############################################
###### PRECONDITION VARIABLES ###############
pre_storm=np.array([])
pre_quiet=np.array([])
for s_chunk,q_chunk in zip(storm_pre,quiet_pre):
pre_storm=np.append(pre_storm,s_chunk[index])
pre_quiet=np.append(pre_quiet,q_chunk[index])
pre_delta=pre_storm-pre_quiet
######### SYM PRECONDITION ####################
### GET MEAN SYM 2hrs prior######################
storm_sym_precond=symprofile[:,1440-120:1440]
mean_sym_history=np.mean(storm_sym_precond,axis=1)
######### Mean storm sym #######################
storm_sym=symprofile[:,1440:1440*2]
mean_sym_storm=np.array([])
for s in range(np.shape(storm_sym)[0]):
mean_sym_storm=np.append(mean_sym_storm,np.mean(storm_sym[s,0:mains[s]+1]))
###############################################
############## LOCAL TIME #####################
if domain[1]=='E':
localtimes=onsethour_float-4.0
else:
localtimes=onsethour_float-7.0
locsine=np.sin(2 * np.pi * localtimes/24.0)
loccos=np.cos(2 * np.pi * localtimes/24.0)
_feat=pd.DataFrame({'Storm History':pre_storm,'Quiet History':pre_quiet,'Delta History':pre_delta,
'Fallofftime':np.array(fallofftime),'Slope':np.array(slope),'Minimum SYM':np.array(minsymh),
'Range SYM':rangesymh,'Mean SYM':mean_sym_storm,'SYM History':mean_sym_history,
'Onset UT':onsethour_float,'UT Onset sine':onsine,'UT Onset cos':oncos,
'Onset LT':localtimes,'LT Onset sine':locsine,'LT Onset cos':loccos,
'Average dTEC':dTECmean,'Std dTEC':dTECstd,'Absolute dTEC':dTECabs})
fname=hdf5_base%domain
print("Writing to file for %s"%domain)
_feat.to_hdf(fname,mode="w", key="df")
print("Completed feature listing")
print("---------------")
""""'''''''''''''''''''''''''''''''''''''''''''''''''
'''''GET PLOTS OF TEC for each storm '''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''"""""""""
def plot_TEC_individual_storm(num):
hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Feature_Study/Mainphase_Datafile_%s.h5'
domains=['HW','HE','MW','ME','LW','LE']
shists=np.array([])
qhists=np.array([])
meansyms=np.array([])
for domain,i in zip(domains,range(6)):
fname=hdf5_base%domain
data=pd.read_hdf(fname,mode='r',key='df')
X=data.drop(labels=["Average dTEC","Std dTEC","Absolute dTEC","Onset UT",
"Onset LT","LT Onset sine","LT Onset cos","Range SYM"],axis=1)
y=data['Average dTEC']
shists=np.append(shists,X['Storm History'][num])
qhists=np.append(qhists,X['Quiet History'][num])
meansyms=np.append(meansyms,X['Mean SYM'][num])
onsethour,fallofftime,slope,minsymh,rangesymh,symprofile=gen_average_storm()
ons,mins=get_onset_time_new_set()
df_HE,df_ME,df_LE,df_HW,df_MW,df_LW=get_delta_TEC()
######
xaxis=np.arange(0,288,1)/12.
storm_TEC_HE=df_HE[::2]
quiet_TEC_HE=df_HE[1::2]
storm_TEC_ME=df_ME[::2]
quiet_TEC_ME=df_ME[1::2]
storm_TEC_LE=df_LE[::2]
quiet_TEC_LE=df_LE[1::2]
storm_TEC_HW=df_HW[::2]
quiet_TEC_HW=df_HW[1::2]
storm_TEC_MW=df_MW[::2]
quiet_TEC_MW=df_MW[1::2]
storm_TEC_LW=df_LW[::2]
quiet_TEC_LW=df_LW[1::2]
######
UTonset=np.round(onsethour[num],2)
mainphase=np.round(fallofftime[num],2)
storm_time_sym=symprofile[:,24*60:48*60]
thisstormsym=storm_time_sym[num]
######
f=plt.figure(figsize=(25,15))
ax=f.add_subplot(111)
ax.plot(xaxis,storm_TEC_HE[num]-quiet_TEC_HE[num],'r-o',lw=3.0,label="Upper East")
ax.plot(xaxis,storm_TEC_ME[num]-quiet_TEC_ME[num],'b-o',lw=3.0,label="Middle East")
ax.plot(xaxis,storm_TEC_LE[num]-quiet_TEC_LE[num],'g-o',lw=3.0,label="Lower East")
ax.plot(xaxis,storm_TEC_HW[num]-quiet_TEC_HW[num],'c-o',lw=3.0,label="Upper West")
ax.plot(xaxis,storm_TEC_MW[num]-quiet_TEC_MW[num],'m-o',lw=3.0,label="Middle West")
ax.plot(xaxis,storm_TEC_LW[num]-quiet_TEC_LW[num],'k-o',lw=3.0,label="Lower West")
ax.set_xlabel("Hours from onset",fontsize=25)
ax.set_ylabel("Total Electron Content (TECu)",fontsize=25)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax.legend(prop={'size':25})
ax.set_xlim([0,24])
# ax.set_ylim([-35,35])
ax.set_title("Storm on %d/%d/%d, starting at %.2f UT"%(ons[num].month,ons[num].day,
ons[num].year,
UTonset),fontsize=25)
ax.axvline(x=mainphase,lw=2.0,color='k',alpha=0.3)
ax2=ax.twinx()
ax2.plot(xaxis,thisstormsym[::5],'y--',alpha=0.5,lw=2.0)
ax2.set_ylabel('SYM-H (nT)',fontsize=25)
ax2.tick_params(axis = 'both', which = 'major', labelsize = 20)
print shists,qhists,meansyms
""""'''''''''''''''''''''''''''''''''''''''''''''''''
'''''GET PLOTS OF TEC for all storms '''''''''''''''
''''''''''''''''''''''''''''''''''''''''''''"""""""""
def plot_TEC_all_storm():
onsethour,fallofftime,slope,minsymh,rangesymh,symprofile=gen_average_storm()
ons,mins=get_onset_time_new_set()
df_HE,df_ME,df_LE,df_HW,df_MW,df_LW=get_delta_TEC()
######
xaxis=np.arange(0,288,1)/12.
storm_TEC_HE=df_HE[::2]
quiet_TEC_HE=df_HE[1::2]
storm_TEC_ME=df_ME[::2]
quiet_TEC_ME=df_ME[1::2]
storm_TEC_LE=df_LE[::2]
quiet_TEC_LE=df_LE[1::2]
storm_TEC_HW=df_HW[::2]
quiet_TEC_HW=df_HW[1::2]
storm_TEC_MW=df_MW[::2]
quiet_TEC_MW=df_MW[1::2]
storm_TEC_LW=df_LW[::2]
quiet_TEC_LW=df_LW[1::2]
######
f=plt.figure(figsize=(25,15))
ax=f.add_subplot(111)
ax.plot(xaxis,np.mean(storm_TEC_HE,axis=0)-np.mean(quiet_TEC_HE,axis=0),'r-o',lw=3.0,label="Upper East")
ax.plot(xaxis,np.mean(storm_TEC_ME,axis=0)-np.mean(quiet_TEC_ME,axis=0),'b-o',lw=3.0,label="Middle East")
ax.plot(xaxis,np.mean(storm_TEC_LE,axis=0)-np.mean(quiet_TEC_LE,axis=0),'g-o',lw=3.0,label="Lower East")
ax.plot(xaxis,np.mean(storm_TEC_HW,axis=0)-np.mean(quiet_TEC_HW,axis=0),'c-o',lw=3.0,label="Upper West")
ax.plot(xaxis,np.mean(storm_TEC_MW,axis=0)-np.mean(quiet_TEC_MW,axis=0),'m-o',lw=3.0,label="Middle West")
ax.plot(xaxis,np.mean(storm_TEC_LW,axis=0)-np.mean(quiet_TEC_LW,axis=0),'k-o',lw=3.0,label="Lower West")
ax.set_xlabel("Hours from onset",fontsize=25)
ax.set_ylabel("$\Delta$TEC (TECu)",fontsize=25)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax.legend(prop={'size':25})
ax.set_xlim([0,24])
ax.axhline(y=0,c='k',lw=2.0,alpha=0.5)
# ax.set_ylim([-35,35])
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''
'''''''''''CORRELATION OF PARAMETERS''''''''''''''''''''''
"""''''''''''''''''''''''''''''''''''''''''''''''''''""""""
def plot_correlation_importance():
hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Feature_Study/Datafile_%s.h5'
domains=['HW','HE','MW','ME','LW','LE']
fig, axs = plt.subplots(3,2, figsize=(30, 20), facecolor='w', edgecolor='k')
fig.subplots_adjust(top=0.915,bottom=0.18,left=0.04,right=0.98,
hspace=0.14,wspace=0.075 )
axs=axs.ravel()
for domain,i in zip(domains,range(6)):
fname=hdf5_base%domain
data=pd.read_hdf(fname,mode='r',key='df')
X=data.drop(labels=["Average dTEC","Std dTEC","Absolute dTEC","Onset UT",
"Onset LT","LT Onset sine","LT Onset cos","Range SYM"],axis=1)
y=data['Average dTEC']
# y=data['Std dTEC']
corr_array=[abs(spear(X[k],y)[0]) for k in X.keys()]
##### PLOT ##############
index = np.arange(len(corr_array))+0.5
bar_width = 0.8
opacity = 0.8
axs[i].bar(index,corr_array,bar_width, alpha=opacity, color='xkcd:red')
axs[i].set_ylim([0,0.7])
if i>3:
axs[i].set_xlabel('Storm Characteristics',labelpad=0.4,fontsize=20)
# axs[i].set_ylabel('Unsigned spearman coefficient')
axs[i].set_xticks(index-0.5)
axs[i].set_xticklabels(X.keys())
axs[i].tick_params(axis='x', which='both',length=0)
for tick in axs[i].xaxis.get_major_ticks():
tick.label.set_fontsize(15)
tick.label.set_rotation(45)
for tick in axs[i].yaxis.get_major_ticks():
tick.label.set_fontsize(15)
else:
# axs[i].set_ylabel('Unsigned spearman coefficient')
empty_string_labels = ['']*len(X.keys())
axs[i].set_xticklabels(empty_string_labels)
for tick in axs[i].yaxis.get_major_ticks():
tick.label.set_fontsize(15)
axs[i].set_title(domain,fontsize=15)
axs[i].grid(axis='y')
plt.suptitle("Unsigned Spearman Rank Correlation with std of $\Delta$ TEC",fontsize=25)
"""'''''''''''''''''''''''''''''''''''''''''''''''''''''''
'''''''''''PARTIAL CORRELATION OF PARAMETERS''''''''''''''''''''''
"""''''''''''''''''''''''''''''''''''''''''''''''''''""""""
def plot_partial_correlation_importance():
var='Std'
hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Feature_Study/Datafile_%s.h5'
domains=['HW','HE','MW','ME','LW','LE']
fig, axs = plt.subplots(3,2, figsize=(30, 20), facecolor='w', edgecolor='k')
fig.subplots_adjust(top=0.915,bottom=0.18,left=0.04,right=0.98,
hspace=0.14,wspace=0.075 )
axs=axs.ravel()
for domain,i in zip(domains,range(6)):
fname=hdf5_base%domain
data=pd.read_hdf(fname,mode='r',key='df')
if var=='Average':
X=data.drop(labels=["Std dTEC","Absolute dTEC","Onset UT",
"Onset LT","LT Onset sine","LT Onset cos","Range SYM"],axis=1)
pcorr=partial_corr(X)
corr_array=abs(np.delete(pcorr[0],0))
keys=np.delete(X.keys(),0)
else:
X=data.drop(labels=["Average dTEC","Absolute dTEC","Onset UT",
"Onset LT","LT Onset sine","LT Onset cos","Range SYM"],axis=1)
pcorr=partial_corr(X)
corr_array=abs(np.delete(pcorr[7],7))
keys=np.delete(X.keys(),7)
##### PLOT ##############
index = np.arange(len(corr_array))+0.5
bar_width = 0.8
opacity = 0.8
axs[i].bar(index,corr_array,bar_width, alpha=opacity, color='xkcd:red')
axs[i].set_ylim([0,0.5])
if i>3:
axs[i].set_xlabel('Storm Characteristics',labelpad=0.4,fontsize=20)
# axs[i].set_ylabel('Unsigned spearman coefficient')
axs[i].set_xticks(index-0.5)
axs[i].set_xticklabels(keys)
axs[i].tick_params(axis='x', which='both',length=0)
for tick in axs[i].xaxis.get_major_ticks():
tick.label.set_fontsize(15)
tick.label.set_rotation(45)
for tick in axs[i].yaxis.get_major_ticks():
tick.label.set_fontsize(15)
else:
# axs[i].set_ylabel('Unsigned spearman coefficient')
empty_string_labels = ['']*len(keys)
axs[i].set_xticklabels(empty_string_labels)
for tick in axs[i].yaxis.get_major_ticks():
tick.label.set_fontsize(15)
axs[i].set_title(domain,fontsize=15)
axs[i].grid(axis='y')
plt.suptitle("Partial Correlation with %s of $\Delta$ TEC"%var,fontsize=25)
def plot_correlation_importance_mainphase():
hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Feature_Study/Mainphase_Datafile_%s.h5'
domains=['HW','HE','MW','ME','LW','LE']
fig, axs = plt.subplots(3,2, figsize=(30, 20), facecolor='w', edgecolor='k')
fig.subplots_adjust(top=0.915,bottom=0.18,left=0.04,right=0.98,
hspace=0.14,wspace=0.075 )
axs=axs.ravel()
for domain,i in zip(domains,range(6)):
fname=hdf5_base%domain
data=pd.read_hdf(fname,mode='r',key='df')
X=data.drop(labels=["Average dTEC","Std dTEC","Absolute dTEC","Onset UT",
"Onset LT","LT Onset sine","LT Onset cos","Range SYM"],axis=1)
y=data['Average dTEC']
mask=X['Fallofftime']>16
X=X[mask]
y=y[mask]
# y=data['Std dTEC']
corr_array=[abs(spear(X[k],y)[0]) for k in X.keys()]
##### PLOT ##############
index = np.arange(len(corr_array))+0.5
bar_width = 0.8
opacity = 0.8
axs[i].bar(index,corr_array,bar_width, alpha=opacity, color='xkcd:red')
axs[i].set_ylim([0,0.7])
if i>3:
axs[i].set_xlabel('Storm Characteristics',labelpad=0.4,fontsize=20)
# axs[i].set_ylabel('Unsigned spearman coefficient')
axs[i].set_xticks(index-0.5)
axs[i].set_xticklabels(X.keys())
axs[i].tick_params(axis='x', which='both',length=0)
for tick in axs[i].xaxis.get_major_ticks():
tick.label.set_fontsize(15)
tick.label.set_rotation(45)
for tick in axs[i].yaxis.get_major_ticks():
tick.label.set_fontsize(15)
else:
# axs[i].set_ylabel('Unsigned spearman coefficient')
empty_string_labels = ['']*len(X.keys())
axs[i].set_xticklabels(empty_string_labels)
for tick in axs[i].yaxis.get_major_ticks():
tick.label.set_fontsize(15)
axs[i].set_title(domain,fontsize=15)
axs[i].grid(axis='y')
plt.suptitle("Unsigned Spearman Rank Correlation with MAINPHASE avg of $\Delta$ TEC",fontsize=25)
def clock_plot(domain):
hdf5_base='/home/sdch10/Datafiles/TEC_Processed_Storm_Study/Feature_Study/Datafile_%s.h5'
fname=hdf5_base%domain
data=pd.read_hdf(fname,mode='r',key='df')
f=plt.figure()
ax=f.add_subplot(121)
cmap=matplotlib.cm.RdBu_r
ff=ax.scatter(data['UT Onset sine'],data['UT Onset cos'],c=data['Average dTEC'],s=500,cmap=cmap)
divider = make_axes_locatable(ax)
cax = divider.append_axes("bottom", size="5%", pad=1.00)
cbar=f.colorbar(ff,cax=cax,orientation="horizontal")
cax.xaxis.set_ticks_position("bottom")
cbar.ax.tick_params(labelsize=25)
cbar.ax.set_xlabel('Average $\Delta$TEC (TECu)',fontsize=25)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax.set_aspect('equal')
ax.set_xticks([-1.0,-0.5,0.0,0.5,1.0])
ax.set_yticks([-1.0,-0.5,0.0,0.5,1.0])
ax.set_xlabel('Sine of 24-hr UT clock',fontsize=25)
ax.set_ylabel('Cos of 24-hr UT clock',fontsize=25)
ax.annotate(s='', xy=(0.8,0), xytext=(-0.75,0), arrowprops=dict(facecolor='black',arrowstyle='<->'),fontsize=25)
ax.annotate(s='', xy=(0,-0.8), xytext=(0,0.8), arrowprops=dict(facecolor='black',arrowstyle='<->'),fontsize=25)
ax.text(x=-0.04,y=-0.8,s='12',fontsize=25)
ax.text(x=0.8,y=-0.04,s='6',fontsize=25)
ax.text(x=-0.9,y=-0.04,s='18',fontsize=25)
ax.text(x=-0.04,y=0.9,s='0',fontsize=25)
ax.grid(which='major',axis='both')
ax.invert_yaxis()
##########
ax2=f.add_subplot(122)
ff2=ax2.scatter(data['UT Onset sine'],data['UT Onset cos'],c=data['Std dTEC'],s=500,cmap=cmap)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("bottom", size="5%", pad=1.00)
cbar2=f.colorbar(ff2,cax=cax2,orientation="horizontal")
cax2.xaxis.set_ticks_position("bottom")
cbar2.ax.tick_params(labelsize=25)
cbar2.ax.set_xlabel('Std $\Delta$TEC (TECu)',fontsize=25)
for tick in ax2.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax2.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax2.set_aspect('equal')
ax2.set_xticks([-1.0,-0.5,0.0,0.5,1.0])
ax2.set_yticks([-1.0,-0.5,0.0,0.5,1.0])
ax2.set_xlabel('Sine of 24-hr UT clock',fontsize=25)
ax2.set_ylabel('Cos of 24-hr UT clock',fontsize=25)
ax2.annotate(s='', xy=(0.8,0), xytext=(-0.75,0), arrowprops=dict(facecolor='black',arrowstyle='<->'),fontsize=25)
ax2.annotate(s='', xy=(0,-0.8), xytext=(0,0.8), arrowprops=dict(facecolor='black',arrowstyle='<->'),fontsize=25)
ax2.text(x=-0.04,y=-0.8,s='12',fontsize=25)
ax2.text(x=0.8,y=-0.04,s='6',fontsize=25)
ax2.text(x=-0.9,y=-0.04,s='18',fontsize=25)
ax2.text(x=-0.04,y=0.9,s='0',fontsize=25)
ax2.grid(which='major',axis='both')
ax2.invert_yaxis()
##############
f.subplots_adjust(top=0.915,
bottom=0.095,
left=0.06,
right=1.0,
hspace=0.2,
wspace=0.075)
f.suptitle('Onset UT dependence on post-storm $\Delta$TEC distribution in High Latitude Positive Declination',fontsize=25)
def sunrise_plot(domain):
onsethour,fallofftime,slope,minsymh,rangesymh,symprofile=gen_average_storm()
ons,mins=get_onset_time_new_set()
onsethour_float=[]
for start in ons:
onsethour_float.append(round(start.hour+start.minute/60.,2))
onsethour_float=np.array(onsethour_float)
df_HE,df_ME,df_LE,df_HW,df_MW,df_LW=get_delta_TEC()
if domain=='HE':
data=df_HE
elif domain=='ME':
data=df_ME
elif domain=='LE':
data=df_LE
elif domain=='HW':
data=df_HW
elif domain=='MW':
data=df_MW
elif domain=='LW':
data=df_LW
storm_TEC=data[::2]
quiet_TEC=data[1::2]
############################################
dTEC=np.array(storm_TEC)-np.array(quiet_TEC)
matrix=np.zeros([24,288])
countmat=np.zeros(24)
nstorms=np.shape(storm_TEC)[0]
for i in range(nstorms):
thisonset=int(onsethour_float[i])
countmat[thisonset]+=1
matrix[thisonset]=matrix[thisonset]+dTEC[i]
for count,i in zip(countmat,range(24)):
if not count==0:
matrix[i]=matrix[i]/count
#############
mymin=np.min(matrix[np.nonzero(matrix)])
mymax=np.max(matrix[np.nonzero(matrix)])
yaxis=np.arange(0,24,0.1)
xaxis_sunrise=12.*np.ones(np.size(yaxis))-yaxis
xaxis_sunrise=xaxis_sunrise%24
locs=np.where(xaxis_sunrise>12)[0]
demarc=locs[0]
print countmat
##############
fig=plt.figure()
ax=fig.add_subplot(111)
cmap=matplotlib.cm.RdBu_r
ret=ax.pcolor(matrix,cmap=cmap,vmin=mymin,vmax=mymax,norm=MidpointNormalize(mymin, mymax, 0.))
cbar=fig.colorbar(ret,ax=ax)
cbar.ax.tick_params(labelsize=25)
cbar.ax.set_ylabel('$\Delta$TEC (TECu)',fontsize=25)
ax.set_yticks(np.arange(24)+0.5)
ax.set_yticklabels(np.arange(24))
ax.set_xticks(np.arange(0,288,24))
ax.set_xticklabels(np.arange(0,288,24)/12)
ax.set_xlabel("Hours after onset",fontsize=25)
ax.set_ylabel("Onset Time (UT)",fontsize=25)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax.plot(xaxis_sunrise[0:demarc]*12,yaxis[0:demarc],'k--',lw=2.0)
ax.plot(xaxis_sunrise[demarc:]*12,yaxis[demarc:],'k--',lw=2.0)
plt.title('Effect of Onset Time',fontsize=25)
plt.show()
def sunrise_plot_mainphase_sorted(domain,dur):
onsethour,fallofftime,slope,minsymh,rangesymh,symprofile=gen_average_storm()
ons,mins=get_onset_time_new_set()
onsethour_float=[]
for start in ons:
onsethour_float.append(round(start.hour+start.minute/60.,2))
onsethour_float=np.array(onsethour_float)
df_HE,df_ME,df_LE,df_HW,df_MW,df_LW=get_delta_TEC()
if domain=='HE':
data=df_HE
elif domain=='ME':
data=df_ME
elif domain=='LE':
data=df_LE
elif domain=='HW':
data=df_HW
elif domain=='MW':
data=df_MW
elif domain=='LW':
data=df_LW
storm_TEC=data[::2]
quiet_TEC=data[1::2]
############################################
dTEC=np.array(storm_TEC)-np.array(quiet_TEC)
matrix=np.zeros([24,288])
countmat=np.zeros(24)
nstorms=np.shape(storm_TEC)[0]
for i in range(nstorms):
thisonset=int(onsethour_float[i])
thisfalloff=fallofftime[i]
if thisfalloff<dur:
countmat[thisonset]+=1
matrix[thisonset]=matrix[thisonset]+dTEC[i]
for count,i in zip(countmat,range(24)):
if not count==0:
matrix[i]=matrix[i]/count
#############
mymin=np.min(matrix[np.nonzero(matrix)])
mymax=np.max(matrix[np.nonzero(matrix)])
yaxis=np.arange(0,24,0.1)
xaxis_sunrise=12.*np.ones(np.size(yaxis))-yaxis
xaxis_sunrise=xaxis_sunrise%24
locs=np.where(xaxis_sunrise>12)[0]
demarc=locs[0]
# print countmat
##############
fig=plt.figure()
ax=fig.add_subplot(111)
cmap=matplotlib.cm.RdBu_r
ret=ax.pcolor(matrix,cmap=cmap,vmin=mymin,vmax=mymax,norm=MidpointNormalize(mymin, mymax, 0.))
cbar=fig.colorbar(ret,ax=ax)
cbar.ax.tick_params(labelsize=25)
cbar.ax.set_ylabel('$\Delta$TEC (TECu)',fontsize=25)
ax.set_yticks(np.arange(24)+0.5)
ax.set_yticklabels(np.arange(24))
ax.set_xticks(np.arange(0,288,24))
ax.set_xticklabels(np.arange(0,288,24)/12)
ax.set_xlabel("Hours after onset",fontsize=25)
ax.set_ylabel("Onset Time (UT)",fontsize=25)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax.plot(xaxis_sunrise[0:demarc]*12,yaxis[0:demarc],'k--',lw=2.0)
ax.plot(xaxis_sunrise[demarc:]*12,yaxis[demarc:],'k--',lw=2.0)
plt.title('Effect of Onset Time for storms <%d hr mainphase'%int(dur),fontsize=25)
plt.show()
def sunrise_dependence_compute(domain,data,ons,numhrs):
'''
numhrs: how many hours after sunrise? negative is before sunrise
domain: which sector
'''
onsethour_float=[]
for start in ons:
onsethour_float.append(round(start.hour+start.minute/60.,2))
onsethour_float=np.array(onsethour_float)
a=Astral()
a.solar_depression = 'civil'
if domain=='HE':
city=a['New York']
elif domain=='ME':
city=a['New York']
elif domain=='LE':
city=a['New York']
elif domain=='HW':
city=a['Phoenix']
elif domain=='MW':
city=a['Phoenix']
elif domain=='LW':
city=a['Phoenix']
storm_TEC=data[::2]
quiet_TEC=data[1::2]
############################################
dTEC=np.array(storm_TEC)-np.array(quiet_TEC)
nums=len(onsethour_float)
avgsunrise=np.zeros(nums)
startsunrise=[]
for i in range(nums):
dTECarr=dTEC[i,:]
#######
sun=city.sun(date=ons[i],local=False)
sunrisehr=sun['sunrise'].hour
sunrisemin=sun['sunrise'].minute
sunrisetime=sunrisehr+sunrisemin/60.
#######
timetosun=(sunrisetime-onsethour_float[i])%24
startsunrise.append(timetosun)
#######
if numhrs>0:
start=int(timetosun*12)
if timetosun<(24-numhrs):
end=start+numhrs*12
else:
end=288
mean_dTEC=np.mean(dTECarr[start:end])
avgsunrise[i]=mean_dTEC
else:
end=int(timetosun*12)
if timetosun>abs(numhrs):
start=end+numhrs*12
else:
start=0
mean_dTEC=np.mean(dTECarr[start:end])
avgsunrise[i]=mean_dTEC
return avgsunrise,np.array(startsunrise)
def sunrise_effect_by_sector():
onsethour,fallofftime,slope,minsymh,rangesymh,symprofile=gen_average_storm()
ons,mins=get_onset_time_new_set()
df_HE,df_ME,df_LE,df_HW,df_MW,df_LW=get_delta_TEC()
domains=['HW','HE','MW','ME','LW','LE']
offsethrs=[-3,-2,-1,1,2,3,4]
sunrise_dTEC=[]
stds=[]
for domain,i in zip(domains,range(6)):
if domain=='HE':
data=df_HE
elif domain=='ME':
data=df_ME
elif domain=='LE':
data=df_LE
elif domain=='HW':
data=df_HW
elif domain=='MW':
data=df_MW
elif domain=='LW':
data=df_LW
avgsunrise=[]
stdsunrise=[]
for offhr in offsethrs:
avgeffect,sunrises=sunrise_dependence_compute(domain,data,ons,offhr)
avgsunrise.append(np.mean(avgeffect))
stdsunrise.append(np.std(avgeffect))
sunrise_dTEC.append(avgsunrise)
stds.append(stdsunrise)
################
f=plt.figure(figsize=(25,15))
ax=f.add_subplot(111)
ax.plot(offsethrs,sunrise_dTEC[0],'r-o',lw=4.0,ms=8,label=domains[0])
ax.plot(offsethrs,sunrise_dTEC[1],'g-o',lw=4.0,ms=8,label=domains[1])
ax.plot(offsethrs,sunrise_dTEC[2],'b-o',lw=4.0,ms=8,label=domains[2])
ax.plot(offsethrs,sunrise_dTEC[3],'c-o',lw=4.0,ms=8,label=domains[3])
ax.plot(offsethrs,sunrise_dTEC[4],'m-o',lw=4.0,ms=8,label=domains[4])
ax.plot(offsethrs,sunrise_dTEC[5],'k-o',lw=4.0,ms=8,label=domains[5])
# ax.errorbar(offsethrs,sunrise_dTEC[0],yerr=stds[0],ecolor='r',elinewidth=3,capthick=5)
# ax.errorbar(offsethrs,sunrise_dTEC[1],yerr=stds[1],ecolor='g',elinewidth=3,capthick=5)
# ax.errorbar(offsethrs,sunrise_dTEC[2],yerr=stds[2],ecolor='b',elinewidth=3,capthick=5)
# ax.errorbar(offsethrs,sunrise_dTEC[3],yerr=stds[3],ecolor='c',elinewidth=3,capthick=5)
# ax.errorbar(offsethrs,sunrise_dTEC[4],yerr=stds[4],ecolor='m',elinewidth=3,capthick=5)
# ax.errorbar(offsethrs,sunrise_dTEC[5],yerr=stds[5],ecolor='k',elinewidth=3,capthick=5)
ax.set_xlabel('Hours offset from sunrise',fontsize=25)
ax.set_ylabel('Average $\Delta$TEC for all storms (TECu)',fontsize=25)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(25)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(25)
ax.legend(prop={'size':25})
# plt.title("Unsigned Spearman Rank Correlation with MAINPHASE avg of $\Delta$ TEC",fontsize=25)
def histogram_sunrise_effect_by_sector():
onsethour,fallofftime,slope,minsymh,rangesymh,symprofile=gen_average_storm()
ons,mins=get_onset_time_new_set()
df_HE,df_ME,df_LE,df_HW,df_MW,df_LW=get_delta_TEC()
domains=['HW','HE','MW','ME','LW','LE']
fig, axs = plt.subplots(3,2, figsize=(30, 20), facecolor='w', edgecolor='k')
fig.subplots_adjust(top=0.914,
bottom=0.077,
left=0.064,
right=0.99,
hspace=0.294,
wspace=0.108 )
bins=np.linspace(-10,10,25)
axs=axs.ravel()
for domain,i in zip(domains,range(6)):
if domain=='HE':
data=df_HE
elif domain=='ME':
data=df_ME
elif domain=='LE':
data=df_LE
elif domain=='HW':
data=df_HW
elif domain=='MW':
data=df_MW
elif domain=='LW':
data=df_LW
avgeffect_2,sunrises_2=sunrise_dependence_compute(domain,data,ons,-2)
avgeffect_1,sunrises_1=sunrise_dependence_compute(domain,data,ons,-1)
avgeffect1,sunrises1=sunrise_dependence_compute(domain,data,ons,1)
avgeffect2,sunrises2=sunrise_dependence_compute(domain,data,ons,2)
m, s = stats.norm.fit(avgeffect_2) # get mean and standard deviation
pdf = norm.pdf(bins, m, s) # now get theoretical values in our interval
axs[i].plot(bins, pdf, 'b',lw=3.0, label="2 hr before")
axs[i].axvline(x=m,c='b',ls='--',lw=3.0)
m, s = stats.norm.fit(avgeffect_1) # get mean and standard deviation
pdf = norm.pdf(bins, m, s) # now get theoretical values in our interval
axs[i].plot(bins, pdf, 'c',lw=3.0,label="1 hr before")
axs[i].axvline(x=m,c='c',ls='--',lw=3.0)
m, s = stats.norm.fit(avgeffect1) # get mean and standard deviation
pdf = norm.pdf(bins, m, s) # now get theoretical values in our interval
axs[i].plot(bins, pdf, 'g',lw=3.0,label="1 hr after")
axs[i].axvline(x=m,c='g',ls='--',lw=3.0)
m, s = stats.norm.fit(avgeffect2) # get mean and standard deviation
pdf = norm.pdf(bins, m, s) # now get theoretical values in our interval
axs[i].plot(bins, pdf, 'r',lw=3.0,label="2 hr after")
axs[i].axvline(x=m,c='r',ls='--',lw=3.0)
axs[i].set_ylim([0,0.2])
# axs[i].hist(avgeffect_2,bins,color='b',alpha=0.05,density=1,label='2 hr before')
# axs[i].hist(avgeffect_1,bins,color='c',alpha=0.05,density=1,label='1 hr before')
# axs[i].hist(avgeffect1,bins,color='m',alpha=0.05,density=1,label='1 hr after')
# axs[i].hist(avgeffect2,bins,color='r',alpha=0.05,density=1,label='2 hr after')
if i>3:
axs[i].set_xlabel('Hours relative to sunrise',labelpad=0.4,fontsize=20)
if i in [0,2,4]:
axs[i].set_ylabel('Probability density',fontsize=20)
axs[i].legend(prop={'size':15})
# axs[i].set_ylabel('Unsigned spearman coefficient')
# axs[i].set_xticks(bins)
# axs[i].set_xticklabels(bins)
# axs[i].tick_params(axis='x', which='both',length=0)
for tick in axs[i].xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in axs[i].yaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in axs[i].yaxis.get_major_ticks():
tick.label.set_fontsize(15)
axs[i].set_title(domain,fontsize=15)
plt.suptitle("Distribution of $\Delta$TEC variation relative to sunrise for all storms",fontsize=25)
# plt.tight_layout()
if __name__ == "__main__":
# on,fall,slope,minsym,rangesym,symprofile=gen_average_storm()
# download_TEC_storm()
# download_TEC_quiet()
print("STARTING..IN A GALAXY FAR FAR AWAY")
# storm_process_TEC_US()
# quiet_process_TEC_US()
# write_to_file_params()
# main_phase_only_write_to_file()
# plot_TEC_individual_storm(2)
# plot_TEC_all_storm()
# plot_correlation_importance_mainphase()
# clock_plot('HW')
sunrise_plot('HW')
# sunrise_plot_mainphase_sorted('HW',5)
# gen_average_storm()
# plot_partial_correlation_importance()
# sunrise_effect_by_sector()
# histogram_sunrise_effect_by_sector() | StarcoderdataPython |
354938 | import tkinter as tk
import tkinter.font as tkf
import re
from PIL import ImageTk, Image
from src.utils import *
class StatusPage(tk.Frame):
STAT_LIST = [('공격력', '공격력', '공격력 증가'),
('마력', '마력', '마력 증가'),
('HP 증가', 'HP', 'HP 증가'),
('치명타 확률 증가', '치명타 확률', '치명타 확률 증가'),
('치명타 데미지 증가', '치명타 데미지', '치명타 데미지 증가(+)'),
('MP 증가', 'MP', 'MP 증가'),
('사신타 확률 증가', '사신타 확률', '사신타 확률 증가'),
('사신타 데미지 증가', '사신타 데미지', '사신타 데미지 증가')]
STAT_MAX = [float('inf'), float('inf'), 100000, 30, 550, 1020, 20, 40]
ACTIVE_LIST = ['사신의낫', '포터', '홀리 실드', '홀리 스나이퍼', '홀리 팅글',
'라이트닝샷', '라이트닝 마그넷', '스카이 라이트닝', '소울 파이어', '점화',
'미스틱블래스', '프리즘 스모크', '홀리 붐', '메테오 오리지널', '천계의 계약']
ACTIVE_MAX = [(140, 6), 1, 30, 30, 50,
50, 50, 50, 50, 50,
30, 50, (100, 5), 50, 50]
PASSIVE_LIST = ['주피터의 지혜', '넵튠의 지혜', '불칸의 지혜', '아폴로의 지혜', '사신의 의무',
'지상의 기도', '영웅의 기운', '미네르바의 지혜', '전설의 주문', '블레싱',
'플루토의 낫', '플루토의 보주', '플루토의 옷', '만해', '타나토스의 손길',
'타나토스의 분노']
PASSIVE_MAX = [300, 250, 150, 100, 100,
100, 100, 10, 30, 30,
5000, 5000, 5000, 50, 1000,
1000]
GRID_COL_POS = [0, 3, 9]
def __init__(self, master, main_frame):
super().__init__(master, bg='white')
self.main_frame = main_frame
self.img_size = (90, 90)
self._initialize_variables(main_frame)
self.update_data(main_frame.userdata)
self._render_frame()
def _initialize_variables(self, master):
self.imgs = {}
for key, filename, _ in StatusPage.STAT_LIST:
img = Image.open('data/img/stat/%s.png'%(filename)).resize(self.img_size, Image.ANTIALIAS)
photo = ImageTk.PhotoImage(img)
self.imgs[key] = photo
for key in StatusPage.PASSIVE_LIST + StatusPage.ACTIVE_LIST:
img = Image.open('data/img/skill/%s.png'%(key)).resize(self.img_size, Image.ANTIALIAS)
photo = ImageTk.PhotoImage(img)
self.imgs[key] = photo
self.levels = {}
for key in [k for k, _, _ in StatusPage.STAT_LIST] + StatusPage.PASSIVE_LIST + StatusPage.ACTIVE_LIST:
if key in ['사신의낫', '<NAME>']:
strvar1 = tk.StringVar()
strvar1.set('')
strvar2 = tk.StringVar()
strvar2.set('')
self.levels[key] = [strvar1, strvar2]
else:
strvar = tk.StringVar()
strvar.set('')
self.levels[key] = strvar
def update_userdata(self):
for key, _, _ in StatusPage.STAT_LIST:
input_level = self.levels[key].get()
try:
level = int(input_level)
self.main_frame.userdata['능력치'][key] = level
except:
self.main_frame.userdata['능력치'][key] = 0
for key in StatusPage.PASSIVE_LIST:
input_level = self.levels[key].get()
try:
level = int(input_level)
self.main_frame.userdata['스킬-패시브'][key] = level
except:
self.main_frame.userdata['스킬-패시브'][key] = 0
for key in StatusPage.ACTIVE_LIST:
if key in ['사신의낫', '<NAME>']:
input_level1 = self.levels[key][0].get()
input_level2 = self.levels[key][1].get()
try:
level1 = int(input_level1)
level2 = int(input_level2)
self.main_frame.userdata['스킬-액티브'][key] = [level1, level2]
except:
self.main_frame.userdata['스킬-액티브'][key] = [0, 0]
else:
input_level = self.levels[key].get()
try:
level = int(input_level)
self.main_frame.userdata['스킬-액티브'][key] = level
except:
self.main_frame.userdata['스킬-액티브'][key] = 0
def _render_frame(self):
self._render_image()
self._render_text()
def _render_image(self):
label = tk.Label(self, text='능력치', font=tkf.Font(family="Maplestory", size=20), bg='white', fg="#202020")
label.grid(row=0, column=StatusPage.GRID_COL_POS[0], columnspan=2, sticky='we')
label = tk.Label(self, text='', font=tkf.Font(family="Maplestory", size=20), bg='white', fg="#202020")
label.grid(row=0, column=StatusPage.GRID_COL_POS[1] - 1, sticky='we', padx=5)
label = tk.Label(self, text='액티브 스킬', font=tkf.Font(family="Maplestory", size=20), bg='white', fg="#202020")
label.grid(row=0, column=StatusPage.GRID_COL_POS[1], columnspan=5, sticky='we')
label = tk.Label(self, text='', font=tkf.Font(family="Maplestory", size=20), bg='white', fg="#202020")
label.grid(row=0, column=StatusPage.GRID_COL_POS[2] - 1, sticky='we', padx=5)
label = tk.Label(self, text='패시브 스킬', font=tkf.Font(family="Maplestory", size=20), bg='white', fg="#202020")
label.grid(row=0, column=StatusPage.GRID_COL_POS[2], columnspan=3, sticky='we')
for i, (key, _, _) in enumerate(StatusPage.STAT_LIST):
img = self.imgs[key]
label = tk.Label(self, bg='white', image=img)
label.grid(row=2 * i + 1, column=StatusPage.GRID_COL_POS[0], rowspan=2, padx=3)
for i, key in enumerate(StatusPage.ACTIVE_LIST):
img = self.imgs[key]
label = tk.Label(self, bg='white', image=img)
label.grid(row=2 * i + 1, column=StatusPage.GRID_COL_POS[1], rowspan=2, padx=3)
for i, key in enumerate(StatusPage.PASSIVE_LIST):
img = self.imgs[key]
label = tk.Label(self, bg='white', image=img)
label.grid(row=2 * i + 1, column=StatusPage.GRID_COL_POS[2], rowspan=2, padx=3)
def _render_text(self):
for i, (key, _, text) in enumerate(StatusPage.STAT_LIST):
text = text + ' (MAX-%d)'%(StatusPage.STAT_MAX[i]) if i not in [0, 1] else text
label = tk.Label(self, text=text, font=tkf.Font(family="Maplestory", size=15), bg='white', fg="#202020")
label.grid(row=2 * i + 1, column=StatusPage.GRID_COL_POS[0] + 1, sticky='w')
entry = tk.Entry(self, width=8, textvariable=self.levels[key], font=tkf.Font(family="Maplestory", size=15),
justify='left')
entry.bind('<Return>', self._calculate_variables)
entry.bind('<FocusOut>', self._calculate_variables)
entry.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[0] + 1, sticky='w')
for i, key in enumerate(StatusPage.ACTIVE_LIST):
if key in ['사신의낫', '홀리 붐']:
label = tk.Label(self, text=key, font=tkf.Font(family="Maplestory", size=15), bg='white', fg="#202020")
label.grid(row=2 * i + 1, column=StatusPage.GRID_COL_POS[1] + 1, columnspan=4, sticky='w')
label = tk.Label(self, text='스킬 레벨: ', font=tkf.Font(family="Maplestory", size=15), bg='white', fg="#202020")
label.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[1] + 1, sticky='w')
label = tk.Label(self, text='각성 회차: ', font=tkf.Font(family="Maplestory", size=15), bg='white', fg="#202020")
label.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[1] + 3, sticky='w', padx=(5,0))
entry = tk.Entry(self, width=4, textvariable=self.levels[key][0], font=tkf.Font(family="Maplestory", size=15),
justify='left')
entry.bind('<Return>', self._calculate_variables)
entry.bind('<FocusOut>', self._calculate_variables)
entry.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[1] + 2, sticky='w')
entry = tk.Entry(self, width=4, textvariable=self.levels[key][1], font=tkf.Font(family="Maplestory", size=15),
justify='left')
entry.bind('<Return>', self._calculate_variables)
entry.bind('<FocusOut>', self._calculate_variables)
entry.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[1] + 4, sticky='w')
else:
label = tk.Label(self, text=key, font=tkf.Font(family="Maplestory", size=15), bg='white', fg="#202020")
label.grid(row=2 * i + 1, column=StatusPage.GRID_COL_POS[1] + 1, columnspan=4, sticky='w')
label = tk.Label(self, text='스킬 레벨: ', font=tkf.Font(family="Maplestory", size=15), bg='white', fg="#202020")
label.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[1] + 1, sticky='w')
entry = tk.Entry(self, width=4, textvariable=self.levels[key], font=tkf.Font(family="Maplestory", size=15),
justify='left')
entry.bind('<Return>', self._calculate_variables)
entry.bind('<FocusOut>', self._calculate_variables)
entry.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[1] + 2, sticky='w')
for i, key in enumerate(StatusPage.PASSIVE_LIST):
text = key + ' (MAX-%d)'%(StatusPage.PASSIVE_MAX[i])
label = tk.Label(self, text=text, font=tkf.Font(family="Maplestory", size=15), bg='white', fg="#202020")
label.grid(row=2 * i + 1, column=StatusPage.GRID_COL_POS[2] + 1, columnspan=2, sticky='w')
label = tk.Label(self, text='스킬 레벨: ', font=tkf.Font(family="Maplestory", size=15), bg='white', fg="#202020")
label.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[2] + 1, sticky='w')
entry = tk.Entry(self, width=5, textvariable=self.levels[key], font=tkf.Font(family="Maplestory", size=15),
justify='left')
entry.bind('<Return>', self._calculate_variables)
entry.bind('<FocusOut>', self._calculate_variables)
entry.grid(row=2 * i + 2, column=StatusPage.GRID_COL_POS[2] + 2, sticky='w')
def _calculate_variables(self, event=None):
self.update_userdata()
def update_data(self, userdata):
for key, _, _ in StatusPage.STAT_LIST:
self.levels[key].set(userdata['능력치'][key])
for key in StatusPage.ACTIVE_LIST:
if key in ['사신의낫', '홀리 붐']:
self.levels[key][0].set(userdata['스킬-액티브'][key][0])
self.levels[key][1].set(userdata['스킬-액티브'][key][1])
else:
self.levels[key].set(userdata['스킬-액티브'][key])
for key in StatusPage.PASSIVE_LIST:
self.levels[key].set(userdata['스킬-패시브'][key])
self._calculate_variables() | StarcoderdataPython |
1897467 | <gh_stars>0
# Define a function `plus()`
def plus(a,b):
return a + b
# Create a `Summation` class
class Summation(object):
def sum(self, a, b):
self.contents = a + b
return self.contents
| StarcoderdataPython |
11257391 | <filename>maddpg_implementation/experiments/test.py
import argparse
import numpy as np
import tensorflow as tf
import time
import pickle
import os
import matplotlib.pyplot as plt
import maddpg_implementation.maddpg.common.tf_util as U
from maddpg_implementation.maddpg.trainer.maddpg import MADDPGAgentTrainer
import tensorflow.contrib.layers as layers
import joblib
def mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
with tf.variable_scope(scope, reuse=reuse):
out = input
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
def make_env(scenario_name, arglist, benchmark=False):
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
# create multiagent environment
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
def get_trainers(env, num_adversaries, obs_shape_n, arglist):
trainers = []
model = mlp_model
trainer = MADDPGAgentTrainer
for i in range(num_adversaries):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.adv_policy=='ddpg')))
for i in range(num_adversaries, env.n):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.good_policy=='ddpg')))
return trainers
def test(arglist):
with U.single_threaded_session():
# Create environment
env = make_env(arglist.scenario, arglist, arglist.benchmark)
# Create agent trainers
obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]
num_adversaries = min(env.n, arglist.num_adversaries)
trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist)
print('Using good policy {} and adv policy {}'.format(arglist.good_policy, arglist.adv_policy))
# Initialize
U.initialize()
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir
print('Loading previous state...')
#MAKE SURE LOAD_DIR IS WHERE WEIGHTS ARE
U.load_state(arglist.load_dir+ "policy")
episode_rewards = [0.0] # sum of rewards for all agents
agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward
final_ep_rewards = [] # sum of rewards for training curve
final_ep_ag_rewards = [] # agent rewards for training curve
agent_info = [[[]]] # placeholder for benchmarking info
t_collisions = []
collisions = []
min_dist = []
obs_covered = []
final_collisions = []
final_dist = []
final_obs_cov = []
transition = []
obs_n = env.reset()
episode_step = 0
train_step = 0
t_start = time.time()
paths = []
path_dict = []
running_paths = [None]
print('Starting iterations...')
while True:
# get action
action_n = [agent.action(obs) for agent, obs in zip(trainers,obs_n)]
# environment step
a_n = []
for i in range(len(trainers)):
a_n.append(np.random.choice(np.arange(len(action_n[0])), p=action_n[i]))
#new_obs_n, rew_n, done_n, info_n = env.step(action_n)
new_obs_n, rew_n, done_n, info_n = env.step(a_n)
episode_step += 1
done = all(done_n)
terminal = (episode_step >= arglist.max_episode_len)
# collect experience
o = np.asarray(obs_n)
o_next = np.asarray(new_obs_n)
o = np.reshape(o, [1, 54])
o_next = np.reshape(o_next, [1, 54])
transition.append((o, a_n[0], a_n[1], a_n[2], o_next))
o1 = np.asarray(obs_n[0])
o1 = np.reshape(o1, [18,])
a1 = np.asarray([a_n[0]])
rew1 = np.asarray([rew_n[0]])
info1 = np.asarray([info_n['n'][0]])
if running_paths[0] is None:
running_paths[0] = dict(
observations=[],
actions=[],
rewards=[],
env_infos=[],
agent_infos=[],
returns=[],
)
running_paths[0]["observations"].append(o1)
running_paths[0]["actions"].append(a1)
running_paths[0]["rewards"].append(rew1)
running_paths[0]["env_infos"].append(info1)
running_paths[0]["agent_infos"].append(info1)
running_paths[0]["returns"].append(0) #THIS IS FILLER. VALUE SHOULD NOT MATTER
obs_n = new_obs_n
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
agent_rewards[i][-1] += rew
if done or terminal:
paths.append(dict(observations=running_paths[0]["observations"],
actions=running_paths[0]["actions"],
rewards=running_paths[0]["rewards"],
env_infos=running_paths[0]["env_infos"],
agent_infos=running_paths[0]["agent_infos"],
returns=running_paths[0]["returns"],
))
running_paths[0] = None
if len(paths) % 10 == 0 and len(paths) > 1:
path_dict.append(dict(paths=paths[-10:]))
joblib.dump(path_dict[-1], 'coop_nav/itr_' + str(len(path_dict)-1) + '.pkl')
obs_n = env.reset()
episode_step = 0
episode_rewards.append(0)
for a in agent_rewards:
a.append(0)
agent_info.append([[]])
# increment global step counter
train_step += 1
# for benchmarking learned policies
# COMMENT OUT FOR NON-MADDPG ENVS
if arglist.benchmark:
collisions.append(max([info_n['n'][0][1], info_n['n'][1][1], info_n['n'][2][1]]) - 1)
if train_step > arglist.benchmark_iters and (done or terminal):
os.makedirs(os.path.dirname(arglist.benchmark_dir), exist_ok=True)
min_dist.append(min([info_n['n'][0][2], info_n['n'][1][2], info_n['n'][1][2]]))
obs_covered.append(info_n['n'][0][3])
t_collisions.append(sum(collisions))
collisions = []
# for displaying learned policies
if arglist.display:
time.sleep(0.1)
env.render()
continue
# save model, display training output
if terminal and (len(episode_rewards) % arglist.save_rate == 0):
# print statement depends on whether or not there are adversaries
if num_adversaries == 0:
print("steps: {}, episodes: {}, mean episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]), round(time.time()-t_start, 3)))
else:
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, time: {}".format(
train_step, len(episode_rewards), np.mean(episode_rewards[-arglist.save_rate:]),
[np.mean(rew[-arglist.save_rate:]) for rew in agent_rewards], round(time.time()-t_start, 3)))
t_start = time.time()
# Keep track of final episode reward
final_ep_rewards.append(np.mean(episode_rewards[-arglist.save_rate:]))
for rew in agent_rewards:
final_ep_ag_rewards.append(np.mean(rew[-arglist.save_rate:]))
final_collisions.append(np.mean(t_collisions[-arglist.save_rate:]))
final_dist.append(np.mean(min_dist[-arglist.save_rate:]))
final_obs_cov.append(np.mean(obs_covered[-arglist.save_rate:]))
os.makedirs(os.path.dirname(arglist.plots_dir), exist_ok=True)
plt.plot(final_ep_rewards)
plt.savefig(arglist.plots_dir + arglist.exp_name + '_rewards.png')
plt.clf()
plt.plot(final_dist)
plt.savefig(arglist.plots_dir + arglist.exp_name + '_min_dist.png')
plt.clf()
plt.plot(final_obs_cov)
plt.savefig(arglist.plots_dir + arglist.exp_name + '_obstacles_covered.png')
plt.clf()
plt.plot(final_collisions)
plt.savefig(arglist.plots_dir + arglist.exp_name + '_total_collisions.png')
plt.clf()
# saves final episode reward for plotting training curve later
if len(episode_rewards) > arglist.num_episodes:
rew_file_name = arglist.plots_dir + arglist.exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(final_ep_rewards, fp)
agrew_file_name = arglist.plots_dir + arglist.exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(final_ep_ag_rewards, fp)
print('...Finished total of {} episodes.'.format(len(episode_rewards)))
print()
print("Average min dist: {}".format(np.mean(final_dist)))
print("Average number of collisions: {}".format(np.mean(final_collisions)))
break
print("Saving Transition...")
transition = np.asarray(transition)
print(transition.shape)
np.save('Transition_new', transition)
print(transition[-1])
def maddpg_test(arglist):
test(arglist)
| StarcoderdataPython |
4821604 | import numpy as np
import warnings
def anisodiff(img,niter=1,kappa=50,gamma=0.1,step=(1.,1.),option=1,ploton=False):
"""
Anisotropic diffusion.
Usage:
imgout = anisodiff(im, niter, kappa, gamma, option)
Arguments:
img - input image
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the image will be plotted on every iteration
Returns:
imgout - diffused image.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x and y axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
<NAME> and <NAME>.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by <NAME>
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by <NAME>
Department of Pharmacology
University of Oxford
<<EMAIL>>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
if img.ndim == 3:
warnings.warn("Only grayscale images allowed, converting to 2D matrix")
img = img.mean(2)
# initialize output array
img = img.astype('float32')
imgout = img.copy()
# initialize some internal variables
deltaS = np.zeros_like(imgout)
deltaE = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
gS = np.ones_like(imgout)
gE = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
fig = pl.figure(figsize=(20,5.5),num="Anisotropic diffusion")
ax1,ax2 = fig.add_subplot(1,2,1),fig.add_subplot(1,2,2)
ax1.imshow(img,interpolation='nearest')
ih = ax2.imshow(imgout,interpolation='nearest',animated=True)
ax1.set_title("Original image")
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in range(niter):
# calculate the diffs
deltaS[:-1,: ] = np.diff(imgout,axis=0)
deltaE[: ,:-1] = np.diff(imgout,axis=1)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gS = np.exp(-(deltaS/kappa)**2.)/step[0]
gE = np.exp(-(deltaE/kappa)**2.)/step[1]
elif option == 2:
gS = 1./(1.+(deltaS/kappa)**2.)/step[0]
gE = 1./(1.+(deltaE/kappa)**2.)/step[1]
# update matrices
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'North/West' by one
# pixel. don't as questions. just do it. trust me.
NS[:] = S
EW[:] = E
NS[1:,:] -= S[:-1,:]
EW[:,1:] -= E[:,:-1]
# update the image
imgout += gamma*(NS+EW)
if ploton:
iterstring = "Iteration %i" %(ii+1)
ih.set_data(imgout)
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return imgout
def anisodiff3(stack,niter=1,kappa=50,gamma=0.1,step=(1.,1.,1.),option=1,ploton=False):
"""
3D Anisotropic diffusion.
Usage:
stackout = anisodiff(stack, niter, kappa, gamma, option)
Arguments:
stack - input stack
niter - number of iterations
kappa - conduction coefficient 20-100 ?
gamma - max value of .25 for stability
step - tuple, the distance between adjacent pixels in (z,y,x)
option - 1 Perona Malik diffusion equation No 1
2 Perona Malik diffusion equation No 2
ploton - if True, the middle z-plane will be plotted on every
iteration
Returns:
stackout - diffused stack.
kappa controls conduction as a function of gradient. If kappa is low
small intensity gradients are able to block conduction and hence diffusion
across step edges. A large value reduces the influence of intensity
gradients on conduction.
gamma controls speed of diffusion (you usually want it at a maximum of
0.25)
step is used to scale the gradients in case the spacing between adjacent
pixels differs in the x,y and/or z axes
Diffusion equation 1 favours high contrast edges over low contrast ones.
Diffusion equation 2 favours wide regions over smaller ones.
Reference:
<NAME> and <NAME>.
Scale-space and edge detection using ansotropic diffusion.
IEEE Transactions on Pattern Analysis and Machine Intelligence,
12(7):629-639, July 1990.
Original MATLAB code by <NAME>
School of Computer Science & Software Engineering
The University of Western Australia
pk @ csse uwa edu au
<http://www.csse.uwa.edu.au>
Translated to Python and optimised by <NAME>
Department of Pharmacology
University of Oxford
<<EMAIL>>
June 2000 original version.
March 2002 corrected diffusion eqn No 2.
July 2012 translated to Python
"""
# ...you could always diffuse each color channel independently if you
# really want
if stack.ndim == 4:
warnings.warn("Only grayscale stacks allowed, converting to 3D matrix")
stack = stack.mean(3)
# initialize output array
stack = stack.astype('float32')
stackout = stack.copy()
# initialize some internal variables
deltaS = np.zeros_like(stackout)
deltaE = deltaS.copy()
deltaD = deltaS.copy()
NS = deltaS.copy()
EW = deltaS.copy()
UD = deltaS.copy()
gS = np.ones_like(stackout)
gE = gS.copy()
gD = gS.copy()
# create the plot figure, if requested
if ploton:
import pylab as pl
from time import sleep
showplane = stack.shape[0]//2
fig = pl.figure(figsize=(20,5.5),num="Anisotropic diffusion")
ax1,ax2 = fig.add_subplot(1,2,1),fig.add_subplot(1,2,2)
ax1.imshow(stack[showplane,...].squeeze(),interpolation='nearest')
ih = ax2.imshow(stackout[showplane,...].squeeze(),interpolation='nearest',animated=True)
ax1.set_title("Original stack (Z = %i)" %showplane)
ax2.set_title("Iteration 0")
fig.canvas.draw()
for ii in range(niter):
# calculate the diffs
deltaD[:-1,: ,: ] = np.diff(stackout,axis=0)
deltaS[: ,:-1,: ] = np.diff(stackout,axis=1)
deltaE[: ,: ,:-1] = np.diff(stackout,axis=2)
# conduction gradients (only need to compute one per dim!)
if option == 1:
gD = np.exp(-(deltaD/kappa)**2.)/step[0]
gS = np.exp(-(deltaS/kappa)**2.)/step[1]
gE = np.exp(-(deltaE/kappa)**2.)/step[2]
elif option == 2:
gD = 1./(1.+(deltaD/kappa)**2.)/step[0]
gS = 1./(1.+(deltaS/kappa)**2.)/step[1]
gE = 1./(1.+(deltaE/kappa)**2.)/step[2]
# update matrices
D = gD*deltaD
E = gE*deltaE
S = gS*deltaS
# subtract a copy that has been shifted 'Up/North/West' by one
# pixel. don't as questions. just do it. trust me.
UD[:] = D
NS[:] = S
EW[:] = E
UD[1:,: ,: ] -= D[:-1,: ,: ]
NS[: ,1:,: ] -= S[: ,:-1,: ]
EW[: ,: ,1:] -= E[: ,: ,:-1]
# update the image
stackout += gamma*(UD+NS+EW)
if ploton:
iterstring = "Iteration %i" %(ii+1)
ih.set_data(stackout[showplane,...].squeeze())
ax2.set_title(iterstring)
fig.canvas.draw()
# sleep(0.01)
return stackout | StarcoderdataPython |
12806201 | from pathlib import Path
# Project Directories
MODULE_ROOT = Path(__file__).resolve().parent
ROOT = MODULE_ROOT.parent.parent
print(ROOT)
| StarcoderdataPython |
3308574 | <filename>UoSM/lab8.py
# Lab 8 (Online Lab)
# 30 November 2020
# © <NAME>
# Code available on Github https://github.com/yonghuatang/soton1/tree/master/FEEG1001
# Python version 3.8
import numpy as np
import scipy
import matplotlib.pyplot as plt
def trapez(f, a, b, n):
h = (b - a) / n
x_data = np.linspace(a, b, n+1) # need n+1 points to define n subdivisions!
y_data = list(map(f, x_data))
weight = np.ones(n+1)
weight[1:-1] = 2
area = (h / 2) * np.dot(y_data, weight)
return area
def finderror(n):
def g(x):
return (x ** 3) / 3
error = trapez(lambda x: x * x, -1, 2, n) - (g(2) - g(-1))
return error
def using_quad():
return scipy.integrate.quad(lambda x: x ** 2, -1, 2)
def f1(x):
return np.cos(2 * np.pi * x) * np.exp(-x ** 2)
def f2(x):
return np.log(x + 2.2)
def create_plot_data(f, xmin, xmax, n):
xs = np.linspace(xmin, xmax, n)
ys = [f(x) for x in xs]
return (xs, ys)
def myplot():
values1 = create_plot_data(f1, -2, 2, 1001)
values2 = create_plot_data(f2, -2, 2, 1001)
def g(x):
return f1(x) - f2(x)
values3 = create_plot_data(g, -2, 2, 1001)
plt.plot(values1[0], values1[1], label='f1')
plt.plot(values2[0], values2[1], label='f2')
plt.plot(values3[0], values3[1], label='f1-f2')
plt.xlabel('x')
plt.legend()
plt.show()
plt.savefig('plot.jpg')
plt.savefig('plot.pdf')
def find_cross():
# To find f1 = f2, i.e. f1 - f2 = 0
def fnew(x):
return f1(x) - f2(x)
return scipy.optimize.brentq(fnew, 0, 2)
| StarcoderdataPython |
8102885 | import unittest
from conans.test.utils.tools import TestClient
from conans.model.ref import ConanFileReference
import os
class NoCopySourceTest(unittest.TestCase):
def test_basic(self):
conanfile = '''
from conans import ConanFile
from conans.util.files import save, load
import os
class ConanFileToolsTest(ConanFile):
name = "Pkg"
version = "0.1"
exports_sources = "*"
no_copy_source = True
def build(self):
self.output.info("Source files: %s" % load(os.path.join(self.source_folder, "file.h")))
save("myartifact.lib", "artifact contents!")
def package(self):
self.copy("*")
'''
client = TestClient()
client.save({"conanfile.py": conanfile,
"file.h": "myfile.h contents"})
client.run("export . lasote/testing")
client.run("install Pkg/0.1@lasote/testing --build")
self.assertIn("Source files: myfile.h contents", client.user_io.out)
ref = ConanFileReference.loads("Pkg/0.1@lasote/testing")
builds = client.client_cache.builds(ref)
pid = os.listdir(builds)[0]
build_folder = os.path.join(builds, pid)
self.assertNotIn("file.h", os.listdir(build_folder))
packages = client.client_cache.packages(ref)
package_folder = os.path.join(packages, pid)
self.assertIn("file.h", os.listdir(package_folder))
self.assertIn("myartifact.lib", os.listdir(package_folder))
def test_source_folder(self):
conanfile = '''
from conans import ConanFile
from conans.util.files import save, load
import os
class ConanFileToolsTest(ConanFile):
name = "Pkg"
version = "0.1"
no_copy_source = %s
def source(self):
save("header.h", "artifact contents!")
def package(self):
self.copy("*.h", dst="include")
'''
client = TestClient()
client.save({"conanfile.py": conanfile % "True"})
client.run("create . lasote/testing --build")
ref = ConanFileReference.loads("Pkg/0.1@lasote/testing")
packages = client.client_cache.packages(ref)
pid = os.listdir(packages)[0]
package_folder = os.path.join(packages, pid, "include")
self.assertIn("header.h", os.listdir(package_folder))
client = TestClient()
client.save({"conanfile.py": conanfile % "False"})
client.run("create . lasote/testing --build")
ref = ConanFileReference.loads("Pkg/0.1@lasote/testing")
packages = client.client_cache.packages(ref)
pid = os.listdir(packages)[0]
package_folder = os.path.join(packages, pid, "include")
self.assertIn("header.h", os.listdir(package_folder)) | StarcoderdataPython |
3328958 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
import pyptouch
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
'Pillow'
]
setup(
name='pyptouch',
version=pyptouch.__version__,
description='Driver for Brothers P-Touch series of label printers.',
url='https://github.com/tld/pyptouch',
license="BSD",
author="<NAME>",
author_email='<EMAIL>',
long_description=readme + '\n\n' + history,
packages=[
'pyptouch'
],
package_dir={'pyptouch': 'pyptouch'},
scripts=['bin/png2ptouch'],
include_package_data=True,
install_requires=requirements,
zip_safe=False,
keywords='pyptouch',
cmdclass={'test': Tox},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Information Technology',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Multimedia :: Graphics :: Graphics Conversion',
'Topic :: Office/Business',
'Topic :: Printing',
'Topic :: Text Processing',
'Topic :: Utilities'
],
extras_require={
'png': ['Pillow>=2.6']
},
# test_suite='tests',
tests_require=['nose']
)
| StarcoderdataPython |
6408070 | <gh_stars>1-10
print(len('foo')) # print, len | StarcoderdataPython |
5145992 | # my_note/__init__.py
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# Importamos as configuracaoes
from config import app_environments
from .admin import admin as admin_blueprint
from .auth import auth as auth_blueprint
from .home import home as home_blueprint
# Instanciamos o banco de dados
db = SQLAlchemy()
# Criamos a nossa aplicacao app aqui
def create_app(environment):
# Instanciamos nossa aplicacao Flask
app = Flask(__name__, instance_relative_config=True)
# Finalmente, como nossa configuracoa esta separada em classes,
# definimos que nossa configuracao deve carregar objetos
# dessas classes
app.config.from_object(app_environments[environment])
# Aqui definimos o arquivo de configuracao
app.config.from_pyfile('config.py')
# Registramos a aplicacao no SQLAlchemy
db.init_app(app)
# Registramos cada um dos blueprints
app.register_blueprint(admin_blueprint, url_prefix='/admin/')
app.register_blueprint(auth_blueprint)
app.register_blueprint(home_blueprint)
migrate = Migrate(app, db)
from my_note import models
return app
| StarcoderdataPython |
11321482 | <filename>apps/api/bitacoras/url.py
from rest_framework.routers import DefaultRouter
from apps.api.bitacoras.views import BitacoraViewSet
router_bitacoras = DefaultRouter()
router_bitacoras.register(prefix='bitacoras', basename='bitacoras', viewset=BitacoraViewSet)
| StarcoderdataPython |
1869059 | import unittest
import sys
import os
import datetime
try:
from unittest import mock
except:
import mock
import voltverine.plugins
class TestTime(unittest.TestCase):
def test_no_time_provided(self):
voltverine_plugin = voltverine.plugins.Time()
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.DUNNO)
self.assertTrue(info == {})
def test_bad_time_provided(self):
voltverine_plugin = voltverine.plugins.Time(time={'start': 'garbage', 'end': 'pulp'})
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.DUNNO)
self.assertTrue(info == {})
def test_time_ok(self):
target = datetime.datetime(1900, 1, 1, 20, 00)
with mock.patch.object(datetime, 'datetime', mock.Mock(wraps=datetime.datetime)) as patched:
patched.now.return_value = target
voltverine_plugin = voltverine.plugins.Time(time={'start': '9:00', 'end': '17:00'})
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.OK)
self.assertTrue(info == {})
def test_time_notok(self):
target = datetime.datetime(1900, 1, 1, 14, 00)
with mock.patch.object(datetime, 'datetime', mock.Mock(wraps=datetime.datetime)) as patched:
patched.now.return_value = target
voltverine_plugin = voltverine.plugins.Time(time={'start': '9:00', 'end': '17:00'})
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.NOT_OK)
self.assertTrue(info == {})
def test_time_list_ok(self):
target = datetime.datetime(1900, 1, 1, 20, 00)
with mock.patch.object(datetime, 'datetime', mock.Mock(wraps=datetime.datetime)) as patched:
patched.now.return_value = target
voltverine_plugin = voltverine.plugins.Time(time=[{'start': '9:00', 'end': '17:00'}])
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.OK)
self.assertTrue(info == {})
def test_time_list_notok(self):
target = datetime.datetime(1900, 1, 1, 14, 00)
with mock.patch.object(datetime, 'datetime', mock.Mock(wraps=datetime.datetime)) as patched:
patched.now.return_value = target
voltverine_plugin = voltverine.plugins.Time(time=[{'start': '9:00', 'end': '17:00'}])
(action, info) = voltverine_plugin.analyze()
self.assertTrue(action == voltverine.plugins.NOT_OK)
self.assertTrue(info == {})
if __name__ == '__main__':
# avoid writing to stderr
unittest.main(testRunner=unittest.TextTestRunner(stream=sys.stdout, verbosity=2))
| StarcoderdataPython |
346110 | # TODO: Turn this into a generator?
def get_tiles(image, tile_size):
"""Splits an image into multiple tiles of a certain size"""
tile_images = []
x = 0
y = 0
while y < image.height:
im_tile = image.crop((x, y, x+tile_size, y+tile_size))
tile_images.append(im_tile)
if x < image.width - tile_size:
x += tile_size
else:
x = 0
y += tile_size
return tile_images
| StarcoderdataPython |
6420353 | import os
from io import StringIO
import hvplot.pandas # noqa
import pandas as pd # noqa
import panel as pn
import param
import pendulum
from astropy.coordinates import SkyCoord
from astropy.utils.data import download_file
from bokeh.models import (ColumnDataSource, DataTable, TableColumn, NumberFormatter, DateFormatter)
from panoptes.utils.data import search_observations, get_metadata
from panoptes.utils.logger import logger
logger.enable('panoptes')
pn.extension()
PROJECT_ID = os.getenv('PROJECT_ID', 'panoptes-exp')
BASE_URL = os.getenv('BASE_URL', 'https://storage.googleapis.com/panoptes-exp.appspot.com/observations.csv')
OBSERVATIONS_BASE_URL = os.getenv('OBSERVATIONS_BASE_URL', 'https://storage.googleapis.com/panoptes-observations')
class ObservationsExplorer(param.Parameterized):
"""Param interface for inspecting observations"""
observation_df = param.DataFrame(
doc='The DataFrame for the observations.',
precedence=-1 # Don't show widget
)
images_df = param.DataFrame(
doc='The DataFrame for the images from the selected observations.',
precedence=-1 # Don't show widget
)
show_recent = param.Boolean(
label='Show recent observations',
doc='Show recent observations',
default=True
)
search_name = param.String(
label='Coordinates for object',
doc='Field name for coordinate lookup',
)
coords = param.XYCoordinates(
label='RA/Dec Coords [deg]',
doc='RA/Dec Coords [degrees]',
default=(0, 0)
)
radius = param.Number(
label='Search radius [deg]',
doc='Search radius [degrees]',
default=15.,
bounds=(0, 180),
softbounds=(0, 25)
)
time = param.DateRange(
label='Date Range',
default=(pendulum.parse('2016-01-01').replace(tzinfo=None), pendulum.now().replace(tzinfo=None)),
bounds=(pendulum.parse('2016-01-01').replace(tzinfo=None), pendulum.now().replace(tzinfo=None))
)
min_num_images = param.Integer(
doc='Minimum number of images.',
default=1,
bounds=(1, 50),
softbounds=(1, 10)
)
unit_id = param.ListSelector(
doc='Unit IDs',
label='Unit IDs',
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
logger.debug(f'Getting recent stats from {BASE_URL}')
self._observations_path = download_file(f'{BASE_URL}',
cache='update',
show_progress=False,
pkgname='panoptes')
self._observations_df = pd.read_csv(self._observations_path).convert_dtypes()
# Setup up widgets
# Set some default for the params now that we have data.
units = sorted(self._observations_df.unit_id.unique())
units.insert(0, 'The Whole World! 🌎')
self.param.unit_id.objects = units
self.unit_id = [units[0]]
# Create the source objects.
self.update_dataset()
@param.depends('coords', 'radius', 'time', 'min_num_images', 'unit_id', 'search_name')
def update_dataset(self):
if self.show_recent:
# Get just the recent result on initial load
now = pendulum.now().replace(tzinfo=None)
df = search_observations(ra=180,
dec=0,
radius=180,
start_date=now.subtract(months=1),
end_date=now,
min_num_images=1,
source=self._observations_df
).sort_values(by=['time', 'unit_id', 'camera_id'], ascending=False)
else:
# If using the default unit_ids option, then search for all.
unit_ids = self.unit_id
if unit_ids == self.param.unit_id.objects[0:1]:
unit_ids = self.param.unit_id.objects[1:]
if self.search_name != '':
coords = SkyCoord.from_name(self.search_name)
self.coords = (
round(coords.ra.value, 3),
round(coords.dec.value, 3)
)
# Search for the observations given the current params.
df = search_observations(ra=self.coords[0],
dec=self.coords[1],
radius=self.radius,
start_date=self.time[0],
end_date=self.time[1],
min_num_images=self.min_num_images,
unit_id=unit_ids
).sort_values(by=['time', 'unit_id', 'camera_id'], ascending=False)
df.time = pd.to_datetime(df.time)
cds = ColumnDataSource(data=df, name='observations_source')
def obs_row_selected(attrname, old_row_index, new_row_index):
# We only lookup one even if they select multiple rows.
newest_index = new_row_index[-1]
row = df.iloc[newest_index]
print(f'Looking up sequence_id={row.sequence_id}')
self.images_df = get_metadata(sequence_id=row.sequence_id)
if self.images_df is not None:
self.images_df = self.images_df.dropna()
cds.selected.on_change('indices', obs_row_selected)
return cds
@param.depends("images_df")
def selected_title(self):
try:
sequence_id = self.images_df.sequence_id.iloc[0]
except AttributeError:
sequence_id = ''
return pn.panel(f'<h5>{sequence_id}</h5>')
@param.depends('images_df')
def image_table(self):
columns = [
('time', 'Time [UTC]')
]
try:
images_table = self.images_df.hvplot.table(columns=columns).opts(
width=250,
height=100,
title=f'Images ({len(self.images_df)})',
)
except AttributeError:
images_table = self.images_df
return images_table
@param.depends('images_df')
def image_preview(self):
try:
image_url = self.images_df.public_url.dropna().iloc[0].replace('.fits.fz', '.jpg')
return pn.pane.HTML(f'''
<div class="media" style="width: 300px; height: 200px">
<a href="{image_url}" target="_blank">
<img src="{image_url}" class="card-img-top" alt="Observation Image">
</a>
</div>
''')
except AttributeError:
return ''
@param.depends('observation_df')
def fits_file_list_to_csv_cb(self):
""" Generates a CSV file from current image list."""
df = self.images_df.public_url.dropna()
sio = StringIO()
df.to_csv(sio, index=False, header=False)
sio.seek(0)
return sio
def table_download_button(self):
""" A button for downloading the images CSV."""
try:
sequence_id = self.images_df.sequence_id.iloc[0]
return pn.widgets.FileDownload(
callback=self.fits_file_list_to_csv_cb,
filename=f'fits-list-{sequence_id}.txt',
label='Download FITS List (.txt)',
)
except AttributeError:
return ''
def sources_download_button(self):
try:
sequence_id = self.images_df.sequence_id.iloc[0]
parquet_url = f'{OBSERVATIONS_BASE_URL}/{sequence_id}-sources.parquet'
source_btn = pn.widgets.Button(
name='Download sources list (.parquet)',
)
source_btn.js_on_click(args=dict(url=parquet_url), code='''
window.open(url, '_blank')
''')
return source_btn
except AttributeError:
return ''
def table(self):
columns = [
TableColumn(
field="unit_id",
title="Unit ID",
width=60,
),
TableColumn(
field="camera_id",
title="Camera ID",
width=60,
),
TableColumn(
field="time",
title="Time [UTC]",
formatter=DateFormatter(format='%Y-%m-%d %H:%M'),
width=130,
),
TableColumn(
field="field_name",
title="Field Name",
width=240,
),
TableColumn(
field="ra",
title="RA [deg]",
formatter=NumberFormatter(format="0.000"),
width=70,
),
TableColumn(
field="dec",
title="Dec [deg]",
formatter=NumberFormatter(format="0.000"),
width=70,
),
TableColumn(
field="num_images",
title="Images",
width=40,
),
TableColumn(
field="status",
title="Status",
width=75,
),
TableColumn(
field="exptime",
title="Exptime [sec]",
formatter=NumberFormatter(format="0.00"),
width=60,
),
TableColumn(
field="total_minutes_exptime",
title="Total Minutes",
formatter=NumberFormatter(format="0.0"),
width=60,
),
]
cds = self.update_dataset()
data_table = DataTable(
source=cds,
name='observations_table',
columns=columns,
index_position=None,
min_width=1100,
fit_columns=True,
sizing_mode='stretch_both',
)
return data_table
| StarcoderdataPython |
1972652 | <reponame>sungyubkim/cifar_training_jax<gh_stars>0
from typing import Any
from functools import partial
from absl import app, flags
import jax
import jax.numpy as jnp
from jax.flatten_util import ravel_pytree
import flax
from flax import linen as nn
from flax.training import train_state, checkpoints
from flax.jax_utils import replicate
import numpy as np
import optax
import tensorflow_datasets as tfds
from tqdm import tqdm
from utils import ckpt, metrics, mixup, mp
from model import vgg, resnet
from cifar.data import load_dataset
import tensorflow as tf
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.Session(config=config)
# additional hyper-parameters
flags.DEFINE_integer('epoch_num', 200,
help='epoch number of pre-training')
flags.DEFINE_float('max_norm', 5.0,
help='maximum norm of clipped gradient')
flags.DEFINE_enum('dataset', 'cifar100', ['cifar10', 'cifar100'],
help='training dataset')
flags.DEFINE_enum('model', 'resnet', ['vgg', 'resnet', 'wrn'],
help='network architecture')
flags.DEFINE_integer('seed', 0,
help='random number seed')
flags.DEFINE_bool('eval', False,
help='do not training')
flags.DEFINE_integer('test_batch_size_total', 1000,
help='total batch size (not device-wise) for evaluation')
flags.DEFINE_integer('log_freq',10,
help='(epoch) frequency of logging')
# tunable hparams for generalization
flags.DEFINE_float('weight_decay', 0.0005,
help='l2 regularization coeffcient')
flags.DEFINE_float('peak_lr', 0.8,
help='peak learning during learning rate schedule')
flags.DEFINE_integer('train_batch_size_total', 1000,
help='total batch size (not device-wise) for training')
FLAGS = flags.FLAGS
class TrainState(train_state.TrainState):
batch_stats: Any
def create_lr_sched(num_train):
total_step = FLAGS.epoch_num * (num_train // FLAGS.train_batch_size_total)
warmup_step = int(0.1 * total_step)
return optax.warmup_cosine_decay_schedule(0.0, FLAGS.peak_lr, warmup_step, total_step)
def init_state(rng, batch, num_classes, num_train):
# parsing model
if FLAGS.model=='vgg':
net = vgg.VGGNet(num_classes=num_classes)
if FLAGS.model=='resnet':
net = resnet.ResNet18(num_classes=num_classes)
elif FLAGS.model=='wrn':
net = resnet.WRN28_10(num_classes=num_classes)
variables = net.init(rng, batch)
params, batch_stats = variables['params'], variables['batch_stats']
tx = optax.chain(
optax.clip_by_global_norm(FLAGS.max_norm),
optax.sgd(
learning_rate=create_lr_sched(num_train),
momentum=0.9,
nesterov=True,
)
)
state = TrainState.create(
apply_fn=net.apply,
params=params,
tx=tx,
batch_stats = batch_stats,
)
return state
def loss_fn(params, state, batch, train):
if train:
logits, new_net_state = state.apply_fn(
{'params':params, 'batch_stats': state.batch_stats},
batch['x'], train=train, mutable=['batch_stats'],
)
else:
logits = state.apply_fn(
{'params':params, 'batch_stats': state.batch_stats},
batch['x'], train=train,
)
new_net_state = None
loss = optax.l2_loss(logits, batch['y']).sum(axis=-1).mean()
wd = 0.5 * jnp.sum(jnp.square(ravel_pytree(params)[0]))
loss_ = loss + FLAGS.weight_decay * wd
acc = jnp.mean(
jnp.argmax(logits, axis=-1) == jnp.argmax(batch['y'],axis=-1)
)
return loss_, (loss, wd, acc, new_net_state)
@partial(jax.pmap, axis_name='batch')
def opt_step(rng, state, batch):
batch = mixup.mixup(rng, batch)
grad_fn = jax.grad(loss_fn, has_aux=True)
grads, (loss, wd, acc, new_net_state) = grad_fn(
state.params,
state,
batch,
True,
)
# sync and update
grads = jax.lax.pmean(grads, axis_name='batch')
batch_stats = jax.lax.pmean(new_net_state['batch_stats'], axis_name='batch')
new_state = state.apply_gradients(
grads=grads, batch_stats=batch_stats
)
# log norm of gradient
grad_norm = jnp.sum(jnp.square(ravel_pytree(grads)[0]))
return loss, wd, grad_norm, acc, new_state
def main(_):
num_devices = jax.device_count()
batch_dims_tr = (num_devices, FLAGS.train_batch_size_total//num_devices)
batch_dims_te = (num_devices, FLAGS.test_batch_size_total//num_devices)
ds_tr, ds_info = tfds.load(
"{}:3.*.*".format(FLAGS.dataset),
data_dir='../tensorflow_datasets',
split='train',
with_info=True,
)
ds_te = tfds.load(
"{}:3.*.*".format(FLAGS.dataset),
data_dir='../tensorflow_datasets',
split='test',
)
# extract info. of dataset
ds_tr, ds_te = ds_tr.cache(), ds_te.cache()
img_shape = ds_info.features['image'].shape
label_info = ds_info.features['label']
class_names = label_info.names
num_classes = label_info.num_classes
num_train = ds_info.splits['train'].num_examples
num_test = ds_info.splits['test'].num_examples
hparams = [
FLAGS.model,
FLAGS.beta,
FLAGS.peak_lr,
FLAGS.train_batch_size_total,
FLAGS.seed,
]
hparams = '_'.join(map(str, hparams))
res_dir = f'./res_cifar/{FLAGS.dataset}/'+hparams
print(f'hyper-parameters : {hparams}')
ckpt.check_dir(res_dir)
# define pseudo-random number generator
rng = jax.random.PRNGKey(FLAGS.seed)
rng, rng_ = jax.random.split(rng)
# initialize network and optimizer
state = init_state(
rng_,
jax.random.normal(rng_, (1, *img_shape)),
num_classes,
num_train,
)
if FLAGS.eval:
state = checkpoints.restore_checkpoint(
res_dir,
state,
)
state = replicate(state)
# train model
eval_tr = load_dataset(
ds_tr,
batch_dims=batch_dims_te,
aug=False,
num_classes=num_classes,
)
eval_te = load_dataset(
ds_te,
batch_dims=batch_dims_te,
aug=False,
num_classes=num_classes,
)
eval_tr = list(eval_tr)
eval_te = list(eval_te)
if not(FLAGS.eval):
pbar = tqdm(range(1,FLAGS.epoch_num+1))
for epoch in pbar:
train_dataset = load_dataset(
ds_tr,
batch_dims=batch_dims_tr,
aug=True,
num_classes=num_classes,
)
for batch_tr in train_dataset:
rng, rng_ = jax.random.split(rng)
loss, wd, grad_norm, acc, state = opt_step(
replicate(rng_),
state,
batch_tr,
)
res = {
'epoch': epoch,
'acc' : f'{np.mean(jax.device_get(acc)):.4f}',
'loss': f'{np.mean(jax.device_get(loss)):.4f}',
'wd' : f'{np.mean(jax.device_get(wd)):.4f}',
'grad_norm' : f'{np.mean(jax.device_get(grad_norm)):.4f}',
}
pbar.set_postfix(res)
if (epoch%FLAGS.log_freq)==0:
ckpt.save_ckpt(state, res_dir)
acc_tr = metrics.acc_dataset(state, eval_tr)
res['acc_tr'] = f'{acc_tr:.4f}'
acc_te = metrics.acc_dataset(state, eval_te)
res['acc_te'] = f'{acc_te:.4f}'
ckpt.dict2tsv(res, res_dir+'/log.tsv')
# evaluate
res = {}
acc_tr = metrics.acc_dataset(state, eval_tr)
res['acc_tr'] = f'{acc_tr:.4f}'
acc_te = metrics.acc_dataset(state, eval_te)
res['acc_te'] = f'{acc_te:.4f}'
# Q1 : How many samples do we need? Is mini-batch (M>1000) sufficient?
tr_hess_batch = metrics.tr_hess_batch(loss_fn, state, eval_tr[0])
tr_hess_dataset = metrics.tr_hess_dataset(loss_fn, state, eval_tr)
tr_ntk_batch = metrics.tr_ntk_batch(state, eval_tr[0])
tr_ntk_dataset = metrics.tr_ntk_dataset(state, eval_tr)
res['tr_hess_batch'] = f'{tr_hess_batch:.4f}'
res['tr_hess_dataset'] = f'{tr_hess_dataset:.4f}'
res['tr_ntk_batch'] = f'{tr_ntk_batch:.4f}'
res['tr_ntk_dataset'] = f'{tr_ntk_dataset:.4f}'
# Q2 : Is loss landscape for test dataset similar to train dataset?
tr_hess_dataset_te = metrics.tr_hess_dataset(loss_fn, state, eval_te)
tr_ntk_dataset_te = metrics.tr_ntk_dataset(state, eval_te)
res['tr_hess_dataset_te'] = f'{tr_hess_dataset_te:.4f}'
res['tr_ntk_dataset_te'] = f'{tr_ntk_dataset_te:.4f}'
ckpt.dict2tsv(res, res_dir+'/sharpness.tsv')
if __name__ == "__main__":
app.run(main) | StarcoderdataPython |
8009191 | from nextcord.ext import commands
from nextcord.ext.commands import errors
class Errors(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, err):
if isinstance(err, errors.MissingRequiredArgument) or isinstance(err, errors.BadArgument):
helper = str(ctx.invoked_subcommand) if ctx.invoked_subcommand else str(
ctx.command)
await ctx.send_help(helper)
elif isinstance(err, errors.CheckFailure):
pass
elif isinstance(err, errors.MaxConcurrencyReached):
await ctx.reply("You've reached max capacity of command usage at once, please finish the previous one...**")
elif isinstance(err, errors.CommandOnCooldown):
await ctx.reply(f"**This command is on cooldown... try again in `{err.retry_after:.2f}` seconds.**")
elif isinstance(err, errors.CommandNotFound):
await ctx.reply(f"**This command is not found... try again**")
def setup(bot):
bot.add_cog(Errors(bot)) | StarcoderdataPython |
11354735 | <reponame>atsgen/tf-test
from common.k8s.base import BaseK8sTest
from k8s.network_policy import NetworkPolicyFixture
from tcutils.wrappers import preposttest_wrapper
from test import BaseTestCase
from k8s.namespace import NamespaceFixture
from k8s.pod import PodFixture
from tcutils.util import get_random_name, get_random_cidr
import time
import test
from tcutils.util import skip_because, Singleton
import gevent
from gevent import greenlet
from firewall_rule import FirewallRuleFixture
from application_policy_set import ApplicationPolicySetFixture
from firewall_policy import FirewallPolicyFixture
from common.isolated_creds import IsolatedCreds
from policy_test import PolicyFixture
from common.policy.config import AttachPolicyFixture
from vnc_api.vnc_api import *
from k8s.namespace import NamespaceFixture
from k8s.hbs import HbsFixture
class TestNetworkPolicy(BaseK8sTest):
@classmethod
def setUpClass(cls):
super(TestNetworkPolicy, cls).setUpClass()
cls.namespace = NamespaceFixture(cls._connections,name="svc",isolation = True)
cls.namespace.setUp()
cls.namespace.verify_on_setup()
namespace = cls.namespace.name
cls.hbs = HbsFixture(cls._connections, name="hbs",namespace = namespace)
assert cls._connections.k8s_client.set_label_for_hbf_nodes( \
node_selector='computenode'), "Error : could not label the nodes"
cls.hbs.setUp()
cls.hbs.verify_on_setup()
def setUp(self):
super(TestNetworkPolicy, self).setUp()
@classmethod
def tearDownClass(cls):
super(TestNetworkPolicy, cls).tearDownClass()
cls.namespace.cleanUp()
assert cls._connections.k8s_client.set_label_for_hbf_nodes(labels={"type":None}), \
"Error : could not label the nodes"
def run_test(self,
vn1_name,
tag_type,
tag_value,
tag_obj_name,
vn2_name=None,
tag2_value=None,
inter_compute=False,
cleanup=True):
project_name = "k8s-" + self.namespace.name
isolated_creds = IsolatedCreds(
self.inputs,
project_name,
input_file=self.input_file,
logger=self.logger)
self.remove_from_cleanups(isolated_creds.cleanUp)
proj_fix = self.create_project(project_name=project_name,
cleanup=False, connections=self.connections)
proj_inputs = isolated_creds.get_inputs(proj_fix)
proj_connection = isolated_creds.get_connections(proj_inputs)
# Create VNs
vn1 = self.setup_vn(project_name =project_name,
connections=proj_connection, inputs=proj_inputs, vn_name = vn1_name)
vn1_dict = {"domain": vn1.domain_name,
"project" : vn1.project_name,
"name": vn1.vn_name}
if vn2_name != None:
vn2 = self.setup_vn(project_name =project_name,
connections=proj_connection, inputs=proj_inputs, vn_name = vn2_name)
vn2_dict = {"domain": vn2.domain_name,
"project" : vn2.project_name,
"name": vn2.vn_name}
# Attach policy btw vn1 and vn2 to allow all traffic
pol_rules_vn1_vn2 = [
{
'direction': '<>', 'simple_action': 'pass',
'protocol': 'any', 'src_ports': 'any',
'dst_ports': 'any',
'source_network': vn1_name,
'dest_network': vn2_name,
},
]
policy1_name = 'p1_vn1_vn2'
policy1_fixture = self.useFixture(
PolicyFixture(
policy_name=policy1_name,
rules_list=pol_rules_vn1_vn2,
inputs=proj_inputs,
connections=proj_connection))
policy_attach_fix1 = self.useFixture(AttachPolicyFixture(
proj_inputs, proj_connection, vn1, policy1_fixture))
policy_attach_fix1 = self.useFixture(AttachPolicyFixture(
proj_inputs, proj_connection, vn2, policy1_fixture))
else:
vn2_dict = None
vn2 = None
policy1_fixture = None
# Create 2 pods
namespace_name = self.namespace.name
compute_label_list, compute_count = self.connections.k8s_client.get_kubernetes_compute_labels()
compute_selector_1 = {'computenode': compute_label_list[0]}
if inter_compute and compute_count >= 2:
compute_selector_2 = {'computenode': compute_label_list[1]}
else:
compute_selector_2 = compute_selector_1
pod1 = self.setup_busybox_pod(namespace=namespace_name,
custom_isolation=True, fq_network_name=vn1_dict,
compute_node_selector=compute_selector_1)
assert pod1.verify_on_setup()
pod2 = self.setup_busybox_pod(namespace=namespace_name,
custom_isolation=True,
fq_network_name = (vn2_dict or vn1_dict),
compute_node_selector=compute_selector_2)
assert pod2.verify_on_setup()
assert pod1.ping_with_certainty(pod2.pod_ip)
self.addCleanup(self.perform_cleanup, pod1)
self.addCleanup(self.perform_cleanup, pod2)
# Create tags
fq_name1 = ['default-domain', project_name,
'%s=%s'%(tag_type, tag_value)]
tag1 = self.create_tag(fq_name=fq_name1,
tag_type=tag_type, tag_value=tag_value,
parent_type='project')
self.addCleanup(self.vnc_h.delete_tag, id=tag1)
if tag2_value != None:
fq_name2 = ['default-domain', project_name,
'%s=%s'%(tag_type, tag2_value)]
tag2 = self.create_tag(fq_name=fq_name1,
tag_type=tag_type, tag_value=tag2_value,
parent_type='project')
self.addCleanup(self.vnc_h.delete_tag, id=tag2)
app_tag_name = 'myk8s'
fq_name3 = ['default-domain', project_name,
'%s=%s'%('application', 'myk8s')]
apptag = self.create_tag(fq_name=fq_name3,
tag_type='application',
tag_value=app_tag_name, parent_type='project')
self.addCleanup(self.vnc_h.delete_tag, id=apptag)
# Apply tag
tag_obj_list = []
tag_value_list = []
if tag_obj_name == 'project':
project_fq_name = ['default-domain', project_name]
tag_obj = self.read_project_obj(project_fq_name=project_fq_name)
tag_obj_list.append(tag_obj)
tag_value_list.append(tag_value)
elif tag_obj_name == 'vmi':
tag_obj1 = self.read_virtual_machine_interface(
id=pod1.vmi_objs[0].uuid)
tag_obj_list.append(tag_obj1)
tag_value_list.append(tag_value)
tag_obj2 = self.read_virtual_machine_interface(
id=pod2.vmi_objs[0].uuid)
tag_obj_list.append(tag_obj2)
tag_value_list.append(tag2_value or tag_value)
elif tag_obj_name == 'vn':
vn_name = ['default-domain', project_name, '%s'%(vn1_name)]
tag_obj = self.read_virtual_network(fq_name=vn_name)
tag_obj_list.append(tag_obj)
tag_value_list.append(tag_value)
if vn2_name:
vn_name = ['default-domain', project_name, '%s'%(vn2_name)]
tag_obj = self.read_virtual_network(fq_name=vn_name)
tag_obj_list.append(tag_obj)
tag_value_list.append(tag2_value or tag_value)
for tag_obj, tagv in zip(tag_obj_list, tag_value_list):
self.set_tag(tag_type=tag_type, tag_value=tagv,
obj=tag_obj)
self.set_tag(tag_type='application', tag_value=app_tag_name,
obj=tag_obj)
# Create FW rule
site_ep1 = {'tags': ['%s=%s'%(tag_type, tag_value)]}
if tag2_value != None:
site_ep2 = {'tags': ['%s=%s'%(tag_type, tag2_value)]}
else:
site_ep2 = None
fwr_fqname = ['default-domain', project_name, 'my_fwr']
fwr_uuid = self.vnc_h.create_firewall_rule(fq_name=fwr_fqname,
parent_type='project', service_groups=[], protocol='icmp',
source=site_ep1,
destination=(site_ep2 or site_ep1), action='pass',
direction = "<>")
rule_obj = self.vnc_h.firewall_rule_read(id=fwr_uuid)
rule_obj.set_action_list(ActionListType(host_based_service=True,simple_action="pass"))
self.vnc_h.firewall_rule_update(rule_obj)
self.addCleanup(self.vnc_h.delete_firewall_rule, id=fwr_uuid)
# Create FW policy and add the rule
rules = [{'uuid': fwr_uuid, 'seq_no': 20}]
fwp_policy_fqname = ['default-domain', project_name, 'fw_pol']
fwp_uuid = self.vnc_h.create_firewall_policy(
parent_type='project',
fq_name=fwp_policy_fqname,
rules=rules)
fwp_obj = self.vnc_h.read_firewall_policy(fq_name=fwp_policy_fqname)
self.addCleanup(self.vnc_h.delete_firewall_policy, id=fwp_uuid)
# Create an APS and add the policy
aps_fqname = ['default-domain', project_name, 'myaps']
aps_uuid = self.vnc_h.create_application_policy_set(
fq_name=aps_fqname,
parent_type='project',
policies=[{'uuid': fwp_uuid, 'seq_no': 20}])
self.addCleanup(self.vnc_h.delete_application_policy_set, id=aps_uuid)
self.vnc_h.set_tag('application', app_tag_name,
False, None, 'application-policy-set', aps_uuid)
#import pdb; pdb.set_trace()
assert pod1.ping_with_certainty(pod2.pod_ip, expectation=True, count='5', hbf_enabled=True)
# Cleanups
for tag_obj in tag_obj_list:
self.addCleanup(self.vnc_h.unset_tag,
tag_type=tag_type, obj=tag_obj)
self.addCleanup(self.vnc_h.unset_tag,
tag_type='application', obj=tag_obj)
'''
self.addCleanup(self.vnc_h.delete_tag, id=tag1)
if tag2_value != None:
self.addCleanup(self.vnc_h.delete_tag, id=tag2)
self.addCleanup(self.vnc_h.delete_tag, id=apptag)
'''
return policy1_fixture, pod1, pod2, fwp_obj, project_name
''' Test 16 '''
@preposttest_wrapper
def test_intra_vn_intra_compute_tag_tier_tagat_vn(self):
self.run_test(vn1_name='vn1',
tag_type='tier',
tag_value='myweb',
tag_obj_name='vn')
# end intra_vn_intra_compute_tag_tier_tagat_vn
@test.attr(type=['k8s_sanity'])
@preposttest_wrapper
def test_intra_vn_intra_compute_tag_deployment_tagat_vn(self):
self.run_test(vn1_name='vn1',
tag_type='deployment',
tag_value='hr',
tag_obj_name='vn')
# end intra_vn_intra_compute_tag_deployment_tagat_vn
@preposttest_wrapper
def test_intra_vn_intra_compute_tag_site_tagat_vn(self):
self.run_test(vn1_name='vn1',
tag_type='site',
tag_value='BLR',
tag_obj_name='vn')
# end intra_vn_intra_compute_tag_site_tagat_vn
@preposttest_wrapper
def test_intra_vn_intra_compute_tag_label_tagat_vn(self):
self.run_test(vn1_name='vn1',
tag_type='label',
tag_value='MYVN',
tag_obj_name='vn')
# end intra_vn_intra_compute_tag_label_tagat_vn
''' Test 16 End '''
''' Test 17 '''
@preposttest_wrapper
def test_intra_vn_inter_compute_tag_tier_tagat_vn(self):
self.run_test(vn1_name='vn1',
tag_type='tier',
tag_value='myweb',
tag_obj_name='vn',
inter_compute=True)
# end intra_vn_inter_compute_tag_tier_tagat_vn
@preposttest_wrapper
def test_intra_vn_inter_compute_tag_deployment_tagat_vn(self):
self.run_test(vn1_name='vn1',
tag_type='deployment',
tag_value='hr',
tag_obj_name='vn',
inter_compute=True)
# end intra_vn_inter_compute_tag_deployment_tagat_vn
@preposttest_wrapper
def test_intra_vn_inter_compute_tag_site_tagat_vn(self):
self.run_test(vn1_name='vn1',
tag_type='site',
tag_value='BLR',
tag_obj_name='vn',
inter_compute=True)
# end intra_vn_inter_compute_tag_site_tagat_vn
@preposttest_wrapper
def test_intra_vn_inter_compute_tag_label_tagat_vn(self):
self.run_test(vn1_name='vn1',
tag_type='label',
tag_value='MYVN',
tag_obj_name='vn',
inter_compute=True)
# end intra_vn_inter_compute_tag_label_tagat_vn
''' Test 17 End '''
''' Test 18 '''
@preposttest_wrapper
def test_inter_vn_intra_compute_tag_tier_tagat_vmi(self):
self.run_test(vn1_name='vn1',
vn2_name="vn2",
tag_type='tier',
tag_value='myweb',
tag2_value='myapp',
tag_obj_name='vmi')
# end test_inter_vn_intra_compute_tag_tier_tagat_vmi
@preposttest_wrapper
def test_inter_vn_intra_compute_tag_deployment_tagat_vmi(self):
self.run_test(vn1_name='vn1',
vn2_name="vn2",
tag_type='deployment',
tag_value='hr',
tag2_value='mkt',
tag_obj_name='vmi')
# end test_inter_vn_intra_compute_tag_deployment_tagat_vmi
@preposttest_wrapper
def test_inter_vn_intra_compute_tag_site_tagat_vmi(self):
self.run_test(vn1_name='vn1',
vn2_name="vn2",
tag_type='site',
tag_value='BLR',
tag2_value='SVL',
tag_obj_name='vmi')
# end test_inter_vn_intra_compute_tag_site_tagat_vmi
@preposttest_wrapper
def test_inter_vn_intra_compute_tag_label_tagat_vmi(self):
self.run_test(vn1_name='vn1',
vn2_name="vn2",
tag_type='label',
tag_value='MYVMI1',
tag2_value='MYVMI2',
tag_obj_name='vmi')
# end test_inter_vn_intra_compute_tag_label_tagat_vmi
''' Test Case 18 End '''
''' Test 19 '''
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_tier_tagat_vmi(self):
self.run_test(vn1_name='vn1',
vn2_name="vn2",
tag_type='tier',
tag_value='myweb',
tag2_value='myapp',
tag_obj_name='vmi',
inter_compute=True)
# end test_inter_vn_inter_compute_tag_tier_tagat_vmi
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_deployment_tagat_vmi(self):
self.run_test(vn1_name='vn1', vn2_name="vn2", tag_type='deployment',
tag_value='hr', tag2_value='mkt',
tag_obj_name='vmi', inter_compute=True)
# end test_inter_vn_inter_compute_tag_deployment_tagat_vmi
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_site_tagat_vmi(self):
self.run_test(vn1_name='vn1', vn2_name="vn2",
tag_type='site', tag_value='BLR',
tag2_value='SVL', tag_obj_name='vmi', inter_compute=True)
# end test_inter_vn_inter_compute_tag_site_tagat_vmi
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_label_tagat_vmi(self):
self.run_test(vn1_name='vn1', vn2_name="vn2", tag_type='label',
tag_value='MYVMI1',tag2_value='MYVMI2',tag_obj_name='vmi',
inter_compute=True)
# end test_inter_vn_inter_compute_tag_label_tagat_vmi
''' Test Case 19 End '''
''' Test 20 '''
@preposttest_wrapper
def test_intra_vn_intra_compute_tag_tier_tagat_vmi(self):
self.run_test(vn1_name='vn1', tag_type='tier', tag_value='myweb',
tag2_value='myapp', tag_obj_name='vmi')
# end test_intra_vn_intra_compute_tag_tier_tagat_vmi
@preposttest_wrapper
def test_intra_vn_intra_compute_tag_deployment_tagat_vmi(self):
self.run_test(vn1_name='vn1', tag_type='deployment',
tag_value='hr', tag2_value='mkt', tag_obj_name='vmi')
# end test_intra_vn_intra_compute_tag_deployment_tagat_vmi
@preposttest_wrapper
def test_intra_vn_intra_compute_tag_site_tagat_vmi(self):
self.run_test(vn1_name='vn1', tag_type='site',
tag_value='BLR', tag2_value='SVL', tag_obj_name='vmi')
# end test_intra_vn_intra_compute_tag_site_tagat_vmi
@preposttest_wrapper
def test_intra_vn_intra_compute_tag_label_tagat_vmi(self):
self.run_test(vn1_name='vn1', tag_type='label',
tag_value='MYVMI1', tag2_value='MYVMI2', tag_obj_name='vmi')
# end test_intra_vn_intra_compute_tag_label_tagat_vmi
''' Test Case 20 End '''
''' Test 21 '''
@preposttest_wrapper
def test_intra_vn_inter_compute_tag_tier_tagat_vmi(self):
self.run_test(vn1_name='vn1', tag_type='tier',
tag_value='myweb', tag2_value='myapp',
tag_obj_name='vmi', inter_compute=True)
# end test_intra_vn_inter_compute_tag_tier_tagat_vmi
@preposttest_wrapper
def test_intra_vn_inter_compute_tag_deployment_tagat_vmi(self):
self.run_test(vn1_name='vn1', tag_type='deployment',
tag_value='hr', tag2_value='mkt',
tag_obj_name='vmi', inter_compute=True)
# end test_intra_vn_inter_compute_tag_deployment_tagat_vmi
@preposttest_wrapper
def test_intra_vn_inter_compute_tag_site_tagat_vmi(self):
self.run_test(vn1_name='vn1', tag_type='site',
tag_value='BLR', tag2_value='SVL',
tag_obj_name='vmi', inter_compute=True)
# end test_intra_vn_inter_compute_tag_site_tagat_vmi
@preposttest_wrapper
def test_intra_vn_inter_compute_tag_label_tagat_vmi(self):
self.run_test(vn1_name='vn1', tag_type='label',
tag_value='MYVMI1', tag2_value='MYVMI2',
tag_obj_name='vmi', inter_compute=True)
# end test_intra_vn_inter_compute_tag_label_tagat_vmi
''' Test Case 21 End '''
''' Test 22 '''
@preposttest_wrapper
def test_inter_vn_intra_compute_tag_tier_tagat_project(self):
self.run_test(vn1_name='vn1', vn2_name="vn2",
tag_type='tier', tag_value='myweb', tag_obj_name='project')
# end test_inter_vn_intra_compute_tag_tier_tagat_project
''' Test 22 End '''
''' Test 23 '''
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_tier_tagat_project(self):
self.run_test(vn1_name='vn1', vn2_name="vn2", tag_type='tier',
tag_value='myweb', tag_obj_name='project', inter_compute=True)
# end test_inter_vn_inter_compute_tag_tier_tagat_project
''' Test 23 End '''
''' Test 24 '''
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_tier_tagat_vn(self):
self.run_test(vn1_name='vn1', vn2_name="vn2",
tag_type='tier', tag_value='myweb',
tag_obj_name='vn', inter_compute=True)
# end test_inter_vn_inter_compute_tag_tier_tagat_vn
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_deployment_tagat_vn(self):
self.run_test(vn1_name='vn1', vn2_name="vn2",
tag_type='deployment', tag_value='hr',
tag_obj_name='vn', inter_compute=True)
# end test_inter_vn_inter_compute_tag_deployment_tagat_vn
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_site_tagat_vn(self):
self.run_test(vn1_name='vn1', vn2_name="vn2",
tag_type='site', tag_value='BLR',
tag_obj_name='vn', inter_compute=True)
# end test_inter_vn_inter_compute_tag_site_tagat_vn
@preposttest_wrapper
def test_inter_vn_inter_compute_tag_label_tagat_vn(self):
self.run_test(vn1_name='vn1', vn2_name="vn2",
tag_type='label', tag_value='MYVN',
tag_obj_name='vn', inter_compute=True)
# end test_inter_vn_inter_compute_tag_label_tagat_vn
''' Test 24 End '''
''' Test case 28 '''
@preposttest_wrapper
def test_policy_at_firewall_and_network_level(self):
policy_fix, pod1, pod2, fwp_obj_uuid, project_name = self.run_test(vn1_name='vn11',
vn2_name="vn22",tag_type='tier', tag_value='myweb11',tag2_value='myweb22',
tag_obj_name='vmi', cleanup=False)
#import pdb;pdb.set_trace()
fwr_uuid=fwp_obj_uuid.get_firewall_rule_refs()[0]['uuid']
rule_obj = self.vnc_h.firewall_rule_read(id=fwr_uuid)
rule_obj.set_action_list(ActionListType(host_based_service=True,
simple_action="deny"))
self.vnc_h.firewall_rule_update(rule_obj)
# Expect ping to fail, as fw policy is set to deny
assert pod1.ping_with_certainty(pod2.pod_ip, expectation=False)
self.vnc_h.update_firewall_rule(uuid=fwr_uuid, action='pass')
assert pod1.ping_with_certainty(pod2.pod_ip, expectation=True, count='5', hbf_enabled=True)
policy_entries = policy_fix.get_entries()
policy_id = policy_fix.get_id()
policy_entries.policy_rule[0].action_list.simple_action = 'deny'
p_rules = policy_entries
policy_fix.update_policy(policy_id, p_rules)
# Expect ping to fail as network policy action is deny
assert pod1.ping_with_certainty(pod2.pod_ip, expectation=False)
#self.perform_cleanup(pod1)
#self.perform_cleanup(pod2)
# end test_policy_at_firewall_network_level
''' Test 28 End '''
@preposttest_wrapper
def intra_vn_tag_tier_tagat_project(self):
self.run_test(vn1_name='vn1', tag_type='tier',
tag_value='myweb', tag_obj_name='project')
# end intra_vn_tag_tier_tagat_project
@preposttest_wrapper
def test_tag_at_vmi_intra_vn(self):
self.run_test(vn1_name='vn1', tag_type='tier',
tag_value='myweb', tag_obj_name='vmi')
# end test_tag_at_vmi_intra_vn
@preposttest_wrapper
def test_tag_at_vmi_inter_vn(self):
'''
Test ping between 2 PODs
'''
self.run_test(vn1_name='vn1',
vn2_name="vn2",
tag_type='tier',
tag_value='myweb',
tag_obj_name='vmi')
# end test_tag_at_vmi_inter_vn
''' Test 29 '''
@preposttest_wrapper
def test_fwp_tag_priority_order_vmi_vn(self):
policy_fix, pod1, pod2, fwp_obj, project_name = self.run_test(vn1_name='vn1',
vn2_name="vn2",
tag_type='tier',
tag_value='myweb1',
tag_obj_name='vn', tag2_value='myweb2', cleanup=False)
# Create application tag
# Create tag
tag_type = 'tier'
tag_value1 = 'myweb3'
tag_value2 = 'myweb4'
fq_name1 = ['default-domain', project_name,
'%s=%s'%(tag_type, tag_value1)]
fq_name2 = ['default-domain', project_name,
'%s=%s'%(tag_type, tag_value2)]
tier_tag_1 = self.create_tag(fq_name=fq_name1,
tag_type=tag_type, tag_value=tag_value1, parent_type='project')
tier_tag_2 = self.create_tag(fq_name=fq_name2,
tag_type=tag_type, tag_value=tag_value2, parent_type='project')
tag_obj1 = self.read_virtual_machine_interface(id=pod1.vmi_objs[0].uuid)
tag_obj2 = self.read_virtual_machine_interface(id=pod2.vmi_objs[0].uuid)
self.set_tag(tag_type=tag_type, tag_value=tag_value1,
obj=tag_obj1)
self.set_tag(tag_type=tag_type, tag_value=tag_value2,
obj=tag_obj2)
self.set_tag(tag_type='application', tag_value='myk8s',
obj=tag_obj1)
self.set_tag(tag_type='application', tag_value='myk8s',
obj=tag_obj2)
site_ep1 = {'tags': ['%s=%s'%(tag_type, tag_value1)]}
site_ep2 = {'tags': ['%s=%s'%(tag_type, tag_value2)]}
fwr_fqname = ['default-domain', project_name, 'my_fwr_tier']
fwr_uuid = self.vnc_h.create_firewall_rule(fq_name=fwr_fqname,
parent_type='project', service_groups=[], protocol='icmp',
source=site_ep1, destination=site_ep2 ,action='deny',
direction = "<>")
rule_obj = self.vnc_h.firewall_rule_read(id=fwr_uuid)
#rule_obj.set_action_list(ActionListType(host_based_service=True,simple_action="deny"))
#self.vnc_h.firewall_rule_update(rule_obj)
'''
self.addCleanup(self.vnc_h.unset_tag,
tag_type=tag_type, obj=tag_obj1)
self.addCleanup(self.vnc_h.unset_tag,
tag_type=tag_type, obj=tag_obj2)
self.addCleanup(self.vnc_h.unset_tag,
tag_type='application', obj=tag_obj1)
self.addCleanup(self.vnc_h.unset_tag,
tag_type='application', obj=tag_obj2)
'''
self.addCleanup(self.vnc_h.delete_tag, id=tier_tag_1)
self.addCleanup(self.vnc_h.delete_tag, id=tier_tag_2)
self.addCleanup(self.vnc_h.delete_firewall_rule, id=fwr_uuid)
# To fw policy add the rule to deny traffic at vmi
rules = [{'uuid': fwr_uuid, 'seq_no': 21}]
self.vnc_h.add_firewall_rules(fwp_obj.uuid, rules)
# Expect ping to fail as tier tag vmis drop traffic
assert pod1.ping_with_certainty(pod2.pod_ip, expectation=False)
# Reset fw rule to allow traffic for tier tag expect ping to pass
rule_obj.set_action_list(ActionListType(host_based_service=True,simple_action="pass"))
self.vnc_h.firewall_rule_update(rule_obj)
assert pod1.ping_with_certainty(pod2.pod_ip, expectation=True, count='5', hbf_enabled=True)
self.vnc_h.remove_firewall_rules(fwp_obj.uuid, rules)
self.vnc_h.unset_tag(tag_type=tag_type, obj=tag_obj1)
self.vnc_h.unset_tag(tag_type=tag_type, obj=tag_obj2)
self.vnc_h.unset_tag(tag_type='application', obj=tag_obj1)
self.vnc_h.unset_tag(tag_type='application', obj=tag_obj2)
# end test_ping_inter_vn
''' Test case 29 End '''
| StarcoderdataPython |
69076 | import numpy as np
class user:
def __init__(self):
self.planned_channel = -1
self.transmission_success = False
print('user creation success')
def choose_channel(self, method, num_channels):
if method == 'uniform':
self.planned_channel = np.random.randint(0, num_channels)
print('planned channel', self.planned_channel)
def transmit(self, occupied_channels):
if self.planned_channel in occupied_channels:
self.transmission_success = False
print('trasmission is failed')
else:
occupied_channels.add(self.planned_channel)
self.transmission_success = True
print('trasmission is successful')
def reset(self):
self.planned_channel = -1
self.transmission_success = False
class net:
def __init__(self, num_channels, num_users, num_transmissions):
self.num_transmissions = num_transmissions
self.num_channels = num_channels
self.occupied_channels = set()
self.users = [user() for i in range(num_users)]
def reset(self):
self.occupied_channels = set()
for user in self.users:
user.reset()
def step(self):
for i in range(self.num_transmissions):
for user in self.users:
user.choose_channel('uniform', self.num_channels)
user.transmit(self.occupied_channels)
def main():
net1 = net(2, 3, 10)
print('Transmission state for user # 0',
net1.users[0].transmission_success)
net1.step()
print(net1.occupied_channels)
net1.reset()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8020385 | from django.apps import AppConfig
class TgadmincoreConfig(AppConfig):
name = 'tgadmincore'
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.