text stringlengths 38 1.54M |
|---|
Python 3.9.1 (tags/v3.9.1:1e5d33e, Dec 7 2020, 17:08:21) [MSC v.1927 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> # Number System Conversion
>>>
>>> bin(25)
'0b11001'
>>> # bin converts decimal to binary system
>>> # This is how you convert
>>> # Divide 25 by 2 = 12 with 1 remanying
>>> # Divide 12 by 2 = 6 with 0 remaining
>>> # Divide 6 by 2 = 3 with 0 remaining
>>> # Divide 3 by 2 = 1 with 1 remaining
>>>
>>> # the 0b is how you represent a binary format bc you might read 11001 as a decimal but placing the "0b" at the beginning, tells the reader its binary
>>>
>>> 0b0101
5
>>>
>>>
>>> # Octsl System (only uses 8 numbers 0,1,2,3,4,5,7)
>>> oct(25)
'0o31'
>>> # the "0o" deliniates that is octal format
>>>
>>>
>>> # Hexadecimal System (base 16 system 0-9 then a-f)
>>> hex(25)
'0x19'
>>> # "0x" deliniates a hexadecimal number
>>> hew(10)
Traceback (most recent call last):
File "<pyshell#23>", line 1, in <module>
hew(10)
NameError: name 'hew' is not defined
>>> hex(10)
'0xa'
>>> 0xf
15
>>> 2**0
1
>>> |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.api import ExponentialSmoothing, SimpleExpSmoothing, Holt
from sklearn.metrics import mean_squared_error
from math import sqrt
#Importing data
#df = pd.read_csv('CreditCardAuthorization.csv')
#Printing head
#df.head()
#Printing tail
#df.tail()
#Subsetting the dataset
#Index 11856 marks the end of year 2013
df = pd.read_csv('CreditCardAuthorization.csv', nrows = 37)
#Creating train and test set
#Index 10392 marks the end of October 2013
train=df[0:25]
test=df[26:]
#Aggregating the dataset at daily level
df.Timestamp = pd.to_datetime(df.Year,format='%Y-%m')
df.index = df.Timestamp
#df = df.resample('D').mean()
train.Timestamp = pd.to_datetime(train.Year,format='%Y-%m')
train.index = train.Timestamp
#train = train.resample('D').mean()
test.Timestamp = pd.to_datetime(test.Year,format='%Y-%m')
test.index = test.Timestamp
#test = test.resample('D').mean()
y_hat_avg = test.copy()
fit1 = ExponentialSmoothing(np.asarray(train['Revenue']) ,seasonal_periods=12 ,trend='add', seasonal='add',).fit()
y_hat_avg['Holt_Winter'] = fit1.forecast(len(test))
plt.figure(figsize=(16,8))
plt.plot( train['Revenue'], label='Train')
plt.plot(test['Revenue'], label='Test')
plt.plot(y_hat_avg['Holt_Winter'], label='Holt_Winter')
plt.legend(loc='best')
plt.show()
rms = sqrt(mean_squared_error(test.Revenue, y_hat_avg.Holt_Winter))
print('Root Mean Square: %s' % rms) # RMSE: 15972.5519541
|
def openOrSenior(data):
return ["Senior" if person[0] >= 55 and person[1] > 7 else "Open" for person in data]
|
# Problem promt:
# Reverse the digits of an integer
def reverse(num):
sign = 1
if num < 0:
sign = -1
num = str(abs(num))
num = num[::-1]
num = sign * int(num)
if num > 2147483647 or num < -2147483647:
return 0
return num
# test cases
print reverse(0)
print reverse(-63482)
print reverse(112345123452345)
print reverse(-2147483648)
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*
import RPi.GPIO as GPIO
import time
from datetime import datetime
from PIL import Image
import pygame
from pygame.locals import *
import os
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
pygame.init()
screen = pygame.display.set_mode((0,0),pygame.FULLSCREEN)
width, height = screen.get_size()
def takepic(imageName): #prend une photo (note: il faut selectionner la ligne qui correspond à votre installation en enlevant le premier # )
# command = "sudo raspistill -t 1000 -w 960 -h 720 -o "+ imageName +" -q 80" #prend une photo
# command = "sudo raspistill -t 1000 -w 960 -h 720 -o "+ imageName +" -rot 90 -q 80" #prend une photo et la tourne de 90°
command = "sudo raspistill -t 1000 -w 960 -h 720 -o "+ imageName +" -rot 180 -q 80" #prend une photo et la tourne de 180°
# command = "sudo raspistill -t 1000 -w 960 -h 720 -o "+ imageName +" -rot 270 -q 80" #prend une photo et la tourne de 270°
os.system(command)
def loadpic(imageName): # affiche imagename
print("loading image: " + imageName)
background = pygame.image.load(imageName);
background.convert_alpha()
background = pygame.transform.scale(background,(width,height))
screen.blit(background,(0,0),(0,0,width,height))
pygame.display.flip()
def minuterie():
writemessage(" 3")
time.sleep(1)
writemessage(" 2")
time.sleep(1)
writemessage(" 1")
time.sleep(1)
writemessage("souriez")
def writemessage(message): # pour pouvoir afficher des messages sur un font noir
screen.fill(pygame.Color(0,0,0))
font = pygame.font.SysFont("verdana", 250, bold=1)
textsurface = font.render(message, 1, pygame.Color(255,255,255))
screen.blit(textsurface,(35,40))
pygame.display.update()
def writemessagetransparent(message): # pour pouvoir afficher des messages en conservant le font
font = pygame.font.SysFont("verdana", 50, bold=1)
textsurface = font.render(message, 1, pygame.Color(255,255,255))
screen.blit(textsurface,(35,40))
pygame.display.update()
if (os.path.isdir("/home/pi/Desktop/photos") == False): # si le dossier pour stocker les photos n'existe pas
os.mkdir("/home/pi/Desktop/photos") # alors on crée le dossier (sur le bureau)
os.chmod("/home/pi/Desktop/photos",0o777) # et on change les droits pour pouvoir effacer des photos
while True : #boucle jusqu'a interruption
try:
print ("\n attente boucle")
#on attend que le bouton soit pressé
GPIO.wait_for_edge(18, GPIO.FALLING)
# on a appuyé sur le bouton...
#on lance le decompte
minuterie()
#on genere le nom de la photo avec heure_min_sec
date_today = datetime.now()
nom_image = date_today.strftime('%d_%m_%H_%M_%S')
#on prend la photo
chemin_photo = '/home/pi/Desktop/photos/'+nom_image+'.jpeg'
takepic(chemin_photo) #on prend la photo
#on affiche la photo
loadpic(chemin_photo)
#on affiche un message
writemessagetransparent("et voila...")
if (GPIO.input(18) == 0): #si le bouton est encore enfoncé (sont etat sera 0)
print("bouton appuye, je dois sortir")
break # alors on sort du while
except KeyboardInterrupt:
print ('sortie du programme!')
raise
GPIO.cleanup() # reinitialisation GPIO lors d'une sortie normale |
import os
import numpy as np
import pandas as pd
import argparse
import progressbar
parser = argparse.ArgumentParser(description='Code to preprocess data from the eICU database')
parser.add_argument('--path', help='Path to eICU database', required=True, type=str)
args = parser.parse_args()
assert len(args.path) > 0, 'Empty path'
widgets = [
progressbar.ETA(),
progressbar.Bar(),
' ', progressbar.DynamicMessage('StayID')
]
# Typical values for imputation, from Benchmarking ML algorithms paper.
impute_values = {'Eyes': 4, 'GCS Total': 15, 'Heart Rate': 86, 'Motor': 6, 'Invasive BP Diastolic': 56,
'Invasive BP Systolic': 118, 'O2 Saturation': 98, 'Respiratory Rate': 19,
'Verbal': 5, 'glucose': 128, 'admissionweight': 81, 'Temperature (C)': 36,
'admissionheight': 170, "MAP (mmHg)": 77, "pH": 7.4, "FiO2": 0.21}
logfile = open(os.path.join('temp', 'preprocess.log'), 'w')
# Read patients.csv
patients = pd.read_csv(os.path.join(args.path, 'patient.csv.gz'), compression='gzip')
logfile.write("patients has {} records\n".format(patients.shape[0]))
# Only choose relevant columns
patients = patients[['patientunitstayid', 'gender', 'age', 'ethnicity', 'apacheadmissiondx', 'admissionheight', 'admissionweight', 'dischargeweight', 'hospitaladmitoffset', 'hospitaldischargeoffset', 'unitdischargeoffset', 'uniquepid', 'hospitaldischargestatus', 'unitdischargestatus']]
# Filter patients by age
patients = patients.loc[patients['age'] != '> 89']
patients = patients.astype({'age': 'float'})
patients = patients.loc[(patients['age'] >= 18) & (patients['age'] <= 89)]
logfile.write("patients has {} records after filtering by age\n".format(patients.shape[0]))
# Filter patients by number of stays
id_counts = patients.groupby(by='uniquepid').count()
single_visit_ids = id_counts[id_counts['patientunitstayid'] == 1].index
patients = patients.loc[patients['uniquepid'].isin(single_visit_ids)]
logfile.write("patients has {} records after filtering by number of stays\n".format(patients.shape[0]))
# Filter patients by gender
gender_map = {'Female': 1, 'Male': 2}
patients = patients.loc[patients['gender'].isin(gender_map)] # Removes records having unknown gender
patients['gender'] = patients['gender'].map(gender_map)
logfile.write("patients has {} records after filtering by gender\n".format(patients.shape[0]))
# Filter patients by discharge status
discharge_map = {'Alive': 0, 'Expired': 1}
patients = patients.loc[patients['hospitaldischargestatus'].isin(discharge_map)]
patients = patients.loc[patients['unitdischargestatus'].isin(discharge_map)]
patients['hospitaldischargestatus'] = patients['hospitaldischargestatus'].map(discharge_map)
patients['unitdischargestatus'] = patients['unitdischargestatus'].map(discharge_map)
logfile.write("patients has {} records after filtering by discharge status\n".format(patients.shape[0]))
# Convert ethnicity to numbers
ethnicity_map = {'Asian': 1, 'African American': 2, 'Caucasian': 3, 'Hispanic': 4, 'Native American': 5, 'NaN': 0, '': 0}
patients.update({'ethnicity': patients['ethnicity'].fillna('').apply(lambda s: ethnicity_map[s] if s in ethnicity_map else ethnicity_map[''])})
logfile.write("patients has {} records after filtering by ethnicity\n".format(patients.shape[0]))
# Convert diagnoses to numbers
patients['apacheadmissiondx'].fillna('nodx', inplace=True)
dx_vals, dx_keys = pd.factorize(patients['apacheadmissiondx'].unique())
apacheadmissiondx_map = dict(zip(dx_keys, dx_vals))
patients['apacheadmissiondx'] = patients['apacheadmissiondx'].map(apacheadmissiondx_map)
logfile.write("patients has {} records after filtering by diagnosis\n".format(patients.shape[0]))
# Using the average of admission and discharge weight wherever possible
patients.loc[patients['dischargeweight'].notnull(), 'admissionweight'] = 0.5*(patients['admissionweight'] + patients['dischargeweight'])
# Clip values to range
patient_features = ['admissionheight', 'admissionweight']
patient_feature_ranges = [(100, 240), (30, 250)]
for feature, (minval, maxval) in zip(patient_features, patient_feature_ranges):
patients[feature].clip(minval, maxval, inplace=True)
# Drop unnecessary columns
patients.drop(columns=['dischargeweight', 'hospitaladmitoffset', 'hospitaldischargeoffset'], inplace=True)
# Select stayids
stayids = patients['patientunitstayid']
patients.to_csv(os.path.join(args.path, 'patient_features.csv.gz'), compression='gzip')
del patients
'''
patients = pd.read_csv(os.path.join(args.path, 'patient_features.csv.gz'), compression='gzip')
stayids = patients['patientunitstayid']
del patients
'''
# Read nurseCharting.csv
nursingchart = pd.read_csv(os.path.join(args.path, 'nurseCharting.csv'))
logfile.write('Loaded nurseCharting\n')
# Drop unnecessary columns
nursingchart.drop(['nursingchartentryoffset','nursingchartcelltypecat'],axis=1,inplace=True)
# Only select relevant rows
nursingchart = nursingchart[nursingchart['patientunitstayid'].isin(stayids)]
# Rename columns for convenience
nursingchart.rename(index=str, columns={"nursingchartoffset": "offset",
"nursingchartcelltypevalname": "itemname",
"nursingchartcelltypevallabel": "itemlabel",
"nursingchartvalue": "itemvalue"}, inplace=True)
logfile.write('Renamed nurseCharting columns\n')
# Select features of interest and keys
nursingchart_featurelabels = ['Heart Rate','MAP (mmHg)','Arterial Line MAP (mmHg)']
nursingchart_featurenames = ['Non-Invasive BP Systolic', 'Invasive BP Systolic', 'Non-Invasive BP Diastolic','Invasive BP Diastolic', 'GCS Total', 'Verbal', 'Eyes', 'Motor', 'O2 Saturation', 'Respiratory Rate', 'Temperature (F)']
nursingchart = nursingchart[(nursingchart.itemlabel.isin(nursingchart_featurelabels)) | (nursingchart.itemname.isin(nursingchart_featurenames))]
logfile.write('Selected rows containing features of interest from nurseCharting\n')
# Unify different names from the itemname and labels here to just have 4 features
nursingchart.loc[nursingchart['itemname'] == 'Value', 'itemname'] = nursingchart['itemlabel'] # Replace itemlabel of 'Value' with itemnames
nursingchart.loc[nursingchart['itemname'] == 'Non-Invasive BP Systolic', 'itemname'] = 'Invasive BP Systolic'# Unify Non invasive to invase systolic
nursingchart.loc[nursingchart['itemname'] == 'Non-Invasive BP Diastolic', 'itemname'] = 'Invasive BP Diastolic'# Unify Non invasive to invase diastolic
nursingchart.loc[nursingchart['itemlabel'] == 'Arterial Line MAP (mmHg)', 'itemname'] = 'MAP (mmHg)'# Unify Arterial MAP and MAP to single MAP
logfile.write('Unified features in nurseCharting\n')
# Drop item label after unifying names
nursingchart.drop(['itemlabel','nursingchartid'],axis=1,inplace=True)
# Converting key-value pairs to new columns
nursingchart = nursingchart.pivot_table(index=['patientunitstayid','offset'], columns='itemname', values='itemvalue',aggfunc='first').reset_index()
logfile.write('Converted key-value pairs to columns in nurseCharting\n')
nursingchart['GCS Total'] = nursingchart['GCS Total'].map({'Unable to score due to medication': np.nan})
logfile.write('Converted key-value pairs to columns in nurseCharting\n')
# Cast table to float
nursingchart = nursingchart.astype('float')
# Convert Fahrenheit to Celsius
nursingchart['Temperature (F)'] = (nursingchart['Temperature (F)'] - 32)*(5/9)
nursingchart.rename(index=str, columns={'Temperature (F)': 'Temperature (C)'}, inplace=True)
logfile.write('Converted Fahrenheit to Celsius in nurseCharting\n')
# Clip values to range
nursingchart_features = ['Invasive BP Diastolic', 'Invasive BP Systolic', 'Heart Rate', 'MAP (mmHg)', 'GCS Total', 'Verbal', 'Eyes', 'Motor', 'O2 Saturation', 'Respiratory Rate', 'Temperature (C)']
nursingchart_feature_ranges = [(0, 375), (0, 375), (0, 350), (14, 330), (2, 16), (1, 5), (0, 5), (0, 6), (0, 100), (0, 100), (26, 45)]
for feature, (minval, maxval) in zip(nursingchart_features, nursingchart_feature_ranges):
nursingchart[feature].clip(minval, maxval, inplace=True)
# Bin offsets into hours
nursingchart['offset'] = (nursingchart['offset']/60).astype('int')
# Impute values within offset by replacing NaN with mean over each column.
nursingchart.groupby(['patientunitstayid', 'offset']).apply(lambda x: x.fillna(x.mean()))
# For each offset, only choose last value.
nursingchart.drop_duplicates(['patientunitstayid', 'offset'], keep='last', inplace=True)
# Impute missing values with "typical values"
nursingchart.fillna(value=impute_values, inplace=True)
logfile.write('Binned and imputed nurseCharting features')
nursingchart.to_csv(os.path.join(args.path, 'nursingchart_features.csv.gz'), compression='gzip')
logfile.write('Wrote nurseCharting features to CSV\n')
del nursingchart
lab = pd.read_csv(os.path.join(args.path, 'lab.csv.gz'), compression='gzip')
logfile.write('Loaded lab\n')
# Only select relevant columns
lab = lab.[['patientunitstayid', 'labresultoffset', 'labname', 'labresult']]
# Only select relevant rows
lab = lab[lab['patientunitstayid'].isin(stayids)]
# Rename columns for convenience
lab.rename(index=str, columns={"labresultoffset": "offset",
"labname": "itemname",
"labresult": "itemvalue"}, inplace=True)
logfile.write('Renamed lab columns\n')
# Select features of interest and keys
lab_featurenames = ['glucose', 'bedside glucose', 'pH', 'FiO2']
lab = lab[(lab.itemname.isin(lab_featurenames))]
logfile.write('Selected rows of interest from lab\n')
# Unify bedside glucose and glucose
lab.loc[lab['itemname'] == 'bedside glucose', 'itemname'] = 'glucose'
logfile.write('Unified features in lab\n')
# Convert key-value pairs to new columns
lab = lab.pivot_table(index=['patientunitstayid','offset'], columns='itemname', values='itemvalue',aggfunc='first').reset_index()
logfile.write('Converted key-value pairs to columns in lab\n')
# Casting columns to float
lab = lab.astype('float')
lab['FiO2'] = lab['FiO2']/100
# Clip values to range
lab_features = ['glucose', 'pH', 'FiO2']
lab_feature_ranges = [(33, 1200), (6.3, 10), (15, 110)]
for feature, (minval, maxval) in zip(lab_features, lab_feature_ranges):
lab[feature].clip(minval, maxval, inplace=True)
# Bin offsets into hours
lab['offset'] = (lab['offset']/60).astype('int')
# Impute values within offset by replacing NaN with mean over each column.
lab.groupby(['patientunitstayid', 'offset']).apply(lambda x: x.fillna(x.mean()))
# For each offset, only choose last value.
lab.drop_duplicates(['patientunitstayid', 'offset'], keep='last', inplace=True)
# Impute missing values with "typical values"
lab.fillna(value=impute_values, inplace=True)
logfile.write('Binned and imputed features from lab\n')
lab.to_csv(os.path.join(args.path, 'lab_features.csv.gz'), compression='gzip')
logfile.write('Wrote lab features to CSV\n')
del lab
# Combining all features
patients = pd.read_csv(os.path.join(args.path, 'patient_features.csv.gz'), compression='gzip')
nursingchart = pd.read_csv(os.path.join(args.path, 'nursingchart_features.csv.gz'), compression='gzip')
lab = pd.read_csv(os.path.join(args.path, 'lab_features.csv.gz'), compression='gzip')
temp = pd.merge(nc, lab, how='outer', on=['patientunitstayid', 'offset']).sort_values(by=['patientunitstayid', 'offset'])
all_features = pd.merge(temp, patients, how='outer', on='patientunitstayid').sort_values(by=['patientunitstayid', 'offset'])
# Impute missing values with "typical values"
all_features.fillna(value=impute_values, inplace=True)
# Filter by number of records
all_features = all_features.groupby('patientunitstayid').filter(lambda x: (x.shape[0] >= 15 and x.shape[0] <= 200))
# Compute RLOS
all_features['rlos'] = all_features['unitdischargeoffset']/1440 - res['offset']/24
# Only choose records having positive offsets and RLOS
all_features = all_features[all_features['offset'] > 0]
all_features = all_features[(all_features['unitdischargeoffset'] > 0) & (res['rlos'] > 0)]
# Write features to CSV
all_features.to_csv(os.path.join(args.path, 'eicu_features.csv.gz'), compression='gzip')
logfile.write('Wrote all features to CSV\n')
logfile.close()
|
from cotton.scm import Git
from fabric import api as fab
from fabric.api import env
class BroadGit(Git):
def git(self, *commands):
with fab.prefix(". /broad/tools/scripts/useuse && use Git-1.7"):
with fab.prefix("umask 0002"):
super(BroadGit, self).git(*commands)
|
from matplotlib import pyplot
from openpyxl import load_workbook
wb = load_workbook('C:\\Users\\p.mykhailyk\\Seafile\\p4ne_training\\data_analysis_lab.xlsx')
sheet_data = wb['Data']
sheet_A = sheet_data['A'][1:]
sheet_B = sheet_data['B'][1:]
def getV(x): return x.value
A_V = list(map(getV,sheet_data['A'][1:]))
B_V = list(map(getV,sheet_data['B'][1:]))
C_V = list(map(getV,sheet_data['C'][1:]))
D_V = list(map(getV,sheet_data['D'][1:]))
pyplot.plot(A_V, C_V)
pyplot.plot(A_V, D_V)
pyplot.show()
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from Instanssi.kompomaatti.models import Entry
VALID_YOUTUBE_URLS = [
# must handle various protocols and hostnames in the video URL
"http://www.youtube.com/v/asdf123456",
"https://www.youtube.com/v/asdf123456/",
"//www.youtube.com/v/asdf123456",
"www.youtube.com/v/asdf123456",
"youtube.com/v/asdf123456/",
# must handle various other ways to define the video
"www.youtube.com/watch?v=asdf123456",
"http://youtu.be/asdf123456",
"https://youtu.be/asdf123456/"
]
class KompomaattiTests(TestCase):
def setUp(self):
pass
def test_youtube_urls(self):
"""Test YouTube video id extraction from URLs."""
for url in VALID_YOUTUBE_URLS:
self.assertEqual(Entry.youtube_url_to_id(url), "asdf123456",
msg="failing URL: %s" % url)
|
import sys # to accept the files from a command line argument as stream
import string # to use the punctuation to remove punctuations from the file
def guess(model, doc):
""" This function guesses the genre of a given document.
It uses sum of polarities to classify the document to its correct genre.
Args:
model: A tuple with four values.
model[0]: name of the first genre.
model[1]: name of the second genre,
model[2]: polarity number,
model[3]: a dictionary lexicon with words as a key and
values as polarities of the word in the documents.
doc: A document as a list of words.
Returns:
The first genre if the sum of polarities in the document is greater than zero.
The second genre if the sum of polarities in the document is less than zero.
None, if the sum of polarities in the document is zero.
"""
lex = model[3] # Copying the dictionary lexicon to a variable lex.
sum_of_polarities = 0 # calculates the sum of polarities in the document
for word in doc:
if word in lex:
sum_of_polarities += lex[word]
if sum_of_polarities > 0:
return model[0] # The first genre
if sum_of_polarities < 0:
return model[1] # The second genre
return None # Returns NOne, if sum_of_polarities is zero
def update_pos_lexico(doc, lex):
""" "
This function updates the lexicon based on the polarity of words on the document.
This function checks if a word is in the lex or not.
If it is on the lex, the polarity of the word is incremented by 1.
Otherwise, the polarity of the word is initialized to 1
Args:
doc: the document which contains the words to be updated their polarity on the lex.
lex: a dictionary having words as keys and their polarity as values.
Returns:
This function returns the updated lexicon.
"""
for word in doc:
if word in lex:
lex[word] += 1
else:
lex[word] = 1
return lex
def update_neg_lexico(doc, lex):
""" "
This function updates the lexicon based on polarity of words on the document.
This function checks if a word is in the lex or not.
If it is on the lex, the polarity of the word is decremented by 1.
Otherwise, the polarity of the word is initialized to -1
Args:
doc: the document which contains the words to be updated their polarity on the lex.
lex: a dictionary having words as keys and their polarity as values.
Returns:
This function returns the updated lexicon.
"""
for word in doc:
if word in lex:
lex[word] -= 1
else:
lex[word] = -1
return lex
def train(pos_genre, neg_genre, training_data, n):
"""
Train the function using the given training data and the model.
This function calls the guess() function n times to guess the the genre of each document.
If the genre is of the document in the training_data is different from result
returned by the guess function, the value of bias as well as the polarity of each word in
the document will be increased by 1.
If the genre is of the document in the training_data is different from result
returned by the guess function, the value of bias as well as the polarity of each word in
the document will be decremented by 1.
Args:
pos_genre: the first genre
neg_genre: the second genre
training_data: A list of tuple with the document to be trained
or guessed and the genre(pos_genre or neg_genre)
n: an integer number to determine the number of times the function will be trained.
Returns:
This function returns a model with four parameters(pos_genre, neg_genre, bias, lex)
"""
bias = 0
lex = {}
model = (pos_genre, neg_genre, bias, lex) # Initial model
for i in range(n): # Repeat n times
for data in training_data:
# Call the guess() function n*len(training_data) times with model and the document.
result = guess(model, data[1])
if data[0] == pos_genre and result != data[0]:
bias += 1
# A function to update the word polarity of positive genres
update_word_polarity = update_pos_lexico(data[1], lex)
lex.update(update_word_polarity)
elif data[0] == neg_genre and result != data[0]:
bias -= 1
# A function to update the word polarity of negative genres
update_word_polarity = update_neg_lexico(data[1], lex)
# update the lexicon with the new polarity.
lex.update(update_word_polarity)
model = (pos_genre, neg_genre, bias, lex)
return model
def test(model, testing_data):
"""This function evaluates the correctness of the guess function for the model.
Args:
model: A tuple with four values.
model[0]: name of the first genre.
model[1]: name of the second genre,
model[2]: polarity number,
model[3]: a dictionary lexicon with words as a key and
values as polarities of the word in the documents.
testing_data: A document as a list of words.
Returns:
Number of correct guesses and total number
of items of the provided genres.
"""
pos_genre = model[0]
neg_genre = model[1]
bias = model[2]
lex = model[3]
# Initialize the counters to 0
first_correct_guess, second_correct_guess, first_total_item, second_total_item = (
0,
0,
0,
0,
)
for data in testing_data:
result = guess(model, data[1])
if data[0] == pos_genre:
first_total_item += 1
if result == data[0]:
first_correct_guess += 1
if data[0] == neg_genre:
second_total_item += 1
if result == data[0]:
second_correct_guess += 1
return (
first_correct_guess,
first_total_item,
second_correct_guess,
second_total_item,
)
def split_doc(lines):
words = []
filtered_lines =[]
for line in lines:
for temp in line.split("\t")[1].split(" "):
sp = temp.split("|")
#Since '' together can't be detected as a punctuation, we have to replace
# one of them. the same for the ``.
sp[0] = sp[0].replace("'","").replace("`","")
if sp[0].islower() and sp[0] not in string.punctuation and not sp[1].startswith("NP"):
#We can comment this line if we want to take repeated words.
# but it takes time and I prefer to take one word only.
if sp[0] not in words:
words.append(sp[0])
filtered_lines.append(words)
return filtered_lines
def test_data(*files):
"""
Use the real data from the files provided as command line arguments and calculates
the correct guesses and total number of items for each genre.
This function accept the files as a parameters. In each file, the genre name
is retrieved from the file using split function with tab as argument.
Args:
file1: The first file through the command line argument
file2: The second file through the command line argument
Returns:
Number of correct guesses and total number of items of the first genre as well as the second genre
"""
training_data = []
testing_data = []
for i in range(len(files)):
with open(files[i], "r") as my_file:
pos = my_file.tell()
genre = my_file.readline().split("\t")[0]
my_file.seek(pos)
# To read the first 60 lines for testing purpose.
lines = [next(my_file) for n in range(60)] # Take the last 15 lines
splited_doc = split_doc(lines)
for line in splited_doc:
training_data.append((genre, line))
# To read the last 15 lines for testing purpose.
lines = [next(my_file) for n in range(15)] # Take the last 15 lines
splited_doc = split_doc(lines)
for line in splited_doc:
testing_data.append((genre, line))
# call train(...) function to train the model using the first 60 lines
train_result = train(training_data[0][0], training_data[1][0], training_data, 10)
# call test(...) function to test the model using the last 15 lines
test_result = test(train_result, testing_data)
return test_result
def main():
doc1 = ["thus", "starts", "a", "scientific", "document"]
doc2 = ["the", "president", "spoke", "to", "a", "neighboring", "country"]
doc3 = ["thus", "spoke", "the", "president"]
lex = {"thus": 2, "scientific": 1, "spoke": -1, "president": -1, "country": -1}
model = ("academic", "newspaper", 0, lex)
training_data = [("academic", doc1), ("newspaper", doc2)]
# 1 Classifying text genre with a polarity lexicon
print(guess(model, doc1))
print(guess(model, doc2))
print(guess(model, doc3))
# #2 Learning a polarity lexicon from annotated examples
print(train("academic", "newspaper", training_data, 5))
# #3 Evaluating the model
testing_data = [("academic", doc1), ("newspaper", doc2), ("newspaper", doc3)]
# Call test() function using a model accepted by the guess() function.
print("\nTest result using the given model:", test(model, testing_data))
# Call test() function using train() function
print("\nTest result using model returned from train() function:", test(train("academic", "newspaper", training_data, 5), testing_data))
# 4 Using the real data from the files
if len(sys.argv) == 3:
print("\nTest result using real data from the files:", test_data(sys.argv[1], sys.argv[2]))
if __name__ == "__main__":
main() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=W0102
from __future__ import (division, absolute_import, print_function,
unicode_literals)
# memoization
# python object, keys will be the arg to fn, value will be the resturn value
def fib(n, memo={}):
if n in memo:
return memo[n]
if n == 0:
return 0
if n <= 2:
return 1
memo[n] = fib(n - 1, memo) + fib(n - 2, memo)
return memo[n]
def main():
print(fib(6)) # 8
print(fib(7)) # 13
print(fib(8)) # 21
print(fib(50)) # 12586269025
print('Hello!')
print('The first 50 fibonacci numbers are:')
print(','.join([str(fib(n)) for n in range(2000)]))
if __name__ == '__main__':
main()
|
import local_learning.models.loopy.factory.leaves as leaves
import local_learning.models.loopy.factory.operators as operators
import local_learning
debug_exceptions = local_learning.debug_exceptions
def indent_code_block(code_block):
return '\n'.join(' ' + line for line in code_block.splitlines())
class Renderer:
def __init__(self, harness):
self.harness = harness
self.ruleset = harness.ruleset
self.model = harness.model
self.node_memory_size = harness.node_memory_size
self.edge_memory_size = harness.edge_memory_size
def render(self):
ruleset_code = self.render_ruleset()
model_code = self.render_model()
return ruleset_code + '\n\n\n' + model_code
def render_ruleset(self):
initialize_rule_code = self.render_initialize_rule_code()
step_rule_code = self.render_step_rule_code()
code = initialize_rule_code + '\n\n\n' + step_rule_code
return code
def render_initialize_rule_code(self):
method_header = 'def initialize_rule(node_memory_size, edge_memory_size, edges):'
edge_slot_assignments = '\n'.join([
'slot_edge_{i}=node_write_buffer[{node_memory_size}+{i}::{edge_memory_size}]'.format(
i=i,
edge_memory_size=self.edge_memory_size,
node_memory_size=self.node_memory_size
) for i in range(self.edge_memory_size)])
ruleset_generated_code = '\n'.join(self.render_rule(initialize_rule) for initialize_rule in self.ruleset.initialize_rules)
method_body = edge_slot_assignments.strip() + '\n' + ruleset_generated_code.strip()
method_contents = '''node_write_buffer = np.zeros(node_memory_size + edge_memory_size * edges)
try:
{method_body}
except Exception as e:
{debug_exceptions_pdb}
raise
return node_write_buffer'''.format(method_body=indent_code_block(method_body).strip(), debug_exceptions_pdb=('import pdb ; pdb.set_trace()' if debug_exceptions else ''))
return '{method_header}\n{method_contents}'.format(method_header=method_header, method_contents=indent_code_block(method_contents))
def render_step_rule_code(self):
method_header = 'def step_rule(node_read_buffer, node_write_buffer, node_memory_size, edge_memory_size, edges):'
edge_slot_assignments = '\n'.join([
'slot_edge_{i}=node_write_buffer[{node_memory_size}+{i}::{edge_memory_size}]'.format(
i=i,
edge_memory_size=self.edge_memory_size,
node_memory_size=self.node_memory_size
) for i in range(self.edge_memory_size)])
filter_initialization = '\n'.join([
'slot_filter_{i}={expression}>0.0'.format(
i=i, expression=self.render_expression_tree(filter_i)
) for i, filter_i in enumerate(self.ruleset.filters)
])
conditional_initialization = '\n'.join([
'slot_conditional_{i}=bool({expression}>0.0)'.format(
i=i, expression=self.render_expression_tree(conditional_i)
) for i, conditional_i in enumerate(self.ruleset.conditionals)
])
ruleset_generated_code = '\n'.join(self.render_rule(step_rule_line) for step_rule_line in sum(self.ruleset.step_rules, []))
# stripped so we can indent it nicely below
method_body = edge_slot_assignments.strip() + '\n' + filter_initialization.strip() + '\n' + conditional_initialization.strip() + '\n' + ruleset_generated_code.strip()
method_contents = '''node_write_buffer[:] = node_read_buffer
try:
{method_body}
except Exception as e:
logger.error(e, exc_info=True)
{debug_exceptions_pdb}
raise'''.format(method_body=indent_code_block(method_body).strip(), debug_exceptions_pdb=('import pdb ; pdb.set_trace()' if debug_exceptions else ''))
return '{method_header}\n{method_contents}'.format(method_header=method_header, method_contents=indent_code_block(method_contents))
def render_rule(self, rule):
rendered_code = ''
# all of this should fit on a single line; to keep life simple
if rule.slot_conditional:
conditional_index = self.ruleset.conditionals.index(rule.slot_conditional)
rendered_code += 'if slot_conditional_{}:'.format(conditional_index)
if rule.slot_type == 'vector':
if rule.slot_filter is not None:
rendered_code += '{slot_value}[slot_filter_{slot_filter_i}]='.format(slot_value=rule.slot_value, slot_filter_i=self.ruleset.filters.index(rule.slot_filter))
else:
rendered_code += '{slot_value}[:]='.format(slot_value=rule.slot_value)
else:
rendered_code += '{slot_value}='.format(slot_value=rule.slot_value)
rendered_code += self.render_expression_tree(rule.expression_tree)
return rendered_code
def render_expression_tree(self, expression_tree):
if len(expression_tree.children) == 0:
rendered_expression = operators.render(expression_tree.operator)
else:
rendered_expression = '{operator}({args})'.format(operator=operators.render(expression_tree.operator), args=', '.join(self.render_expression_tree(child) for child in expression_tree.children))
# need to do weird shit with filters...
if expression_tree.slot_type == 'float':
return rendered_expression
node_slot_filter = expression_tree.slot_filter
if expression_tree.slot_type == 'vector' and expression_tree.parent == None:
if node_slot_filter is not None:
node_slot_filter_i = self.ruleset.filters.index(node_slot_filter)
return 'operators.ensure_vector({}, edges, slot_filter_{})'.format(rendered_expression, node_slot_filter_i)
else:
return 'operators.ensure_vector({}, edges, None)'.format(rendered_expression)
parent_slot_filter = expression_tree.parent.slot_filter
if parent_slot_filter == node_slot_filter:
return rendered_expression
if parent_slot_filter is None and node_slot_filter is not None:
node_slot_filter_i = self.ruleset.filters.index(node_slot_filter)
return 'operators.undo_filter({expression}, slot_filter_{i})'.format(expression=rendered_expression, i=node_slot_filter_i)
if parent_slot_filter is not None and node_slot_filter is None:
parent_slot_filter_i = self.ruleset.filters.index(parent_slot_filter)
return 'operators.apply_filter({expression}, slot_filter_{i})'.format(expression=rendered_expression, i=parent_slot_filter_i)
if parent_slot_filter is not None and node_slot_filter is not None:
# ouch ; they're both different...
parent_slot_filter_i = self.ruleset.filters.index(parent_slot_filter)
node_slot_filter_i = self.ruleset.filters.index(node_slot_filter)
return 'operators.apply_filter(operators.undo_filter({expression}, slot_filter_{node_i}), slot_filter_{parent_i})'.format(expression=rendered_expression, node_i=node_slot_filter_i, parent_i=parent_slot_filter_i)
def render_model(self):
model = self.model
header = model.header
methods = model.methods
format_arguments = {
'edge_memory_size': self.edge_memory_size,
'node_memory_size': self.node_memory_size,
'adjacency_dict': model.adjacency_dict,
'NODE_SIGNAL_MEMORY_INDEX': 0,
'NODE_SIGNAL_JUST_SENT_INDEX': 1,
'NODE_ERROR_JUST_SENT_INDEX': 2,
'EDGE_SIGNAL_INDEX': 0,
'EDGE_HAS_SIGNAL_INDEX': 1,
'EDGE_ERROR_INDEX': 2,
'EDGE_HAS_ERROR_INDEX': 3,
'EDGE_WEIGHT_INDEX': 4
}
rendered_methods = '\n\n'.join([indent_code_block(method.format(**format_arguments).strip()) for method in methods])
code = '{header}\n{rendered_methods}'.format(header=header, rendered_methods=rendered_methods)
return code
|
from __future__ import print_function
import io
import os
os.remove('./data.txt')
for entry in os.scandir('./clean'):
with io.open(entry, 'r') as f:
file = f.read()
new_path = './data.txt'
new_lyrics_file = open(new_path, 'a+')
new_lyrics_file.write(file)
new_lyrics_file.close()
|
thisdict={
"brand":"ford",
"model":"mustang",
"year":1966}
if "model" in thisdict:
print("YES, It's in thisdict")
thisdict={
"brand":"ford",
"model":"mustang",
"year":1966}
print(len(thisdict))
thisdict={
"brand":"ford",
"model":"mustang",
"year":1966}
thisdict["color"]="red"
print(thisdict)
thisdict={
"brand":"ford",
"model":"mustang",
"year":1966}
thisdict.pop("model")
print(thisdict)
thisdict={
"brand":"ford",
"model":"mustang",
"year":1966}
thisdict.popitem()
print(thisdict)
thisdict={
"brand":"ford",
"model":"mustang",
"year":1966}
del thisdict["model"]
print(thisdict)
thisdict={
"brand":"ford",
"model":"mustang",
"year":1966}
thisdict.clear()
print(thisdict)
thisdict={
"brand":"ford",
"model":"mustang",
"year":1966}
del thisdict
print(thisdict)
|
#!/usr/bin/python3
import numpy as np
class ScrapBooker:
@staticmethod
def crop(array, dimensions, position=(0, 0)):
p = position
d = tuple(i if i < j else j for i, j, in zip(dimensions, array.shape))
if any(True for i, j in zip(d, dimensions) if dimensions > d):
print("Dimensions out of bounds. Cropped to max size.")
return array[p[1]: p[1] + d[1], p[0]: p[0] + d[0]]
@staticmethod
def thin(array, n, axis):
if axis == 1:
array = array.transpose((1, 0, 2))
th = [j for i, j in enumerate(array) if i % n != 0]
th = np.array(th)
return th if axis == 0 else th.transpose((1, 0, 2))
@staticmethod
def juxtapose(array, n, axis):
jp = array
while n - 1 > 0:
jp = np.concatenate((jp, array), axis)
n -= 1
return jp
def mosaic(self, array, dimensions):
mos = self.juxtapose(array, dimensions[0], 0)
mos = self.juxtapose(mos, dimensions[1], 1)
return mos
|
class Gel:
"""The Gel object defines a swelling gel.
Notes
-----
This object can be accessed by:
.. code-block:: python
import material
mdb.models[name].materials[name].gel
import odbMaterial
session.odbs[name].materials[name].gel
The table data for this object are:
- Radius of gel particles when completely dry, radry.
- Fully swollen radius of gel particles, raf.
- Number of gel particles per unit volume, ka.
- Relaxation time constant for long-term swelling of gel particles, τ1.
The corresponding analysis keywords are:
- GEL
"""
def __init__(self, table: tuple):
"""This method creates a Gel object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].materials[name].Gel
session.odbs[name].materials[name].Gel
Parameters
----------
table
A sequence of sequences of Floats specifying the items described below.
Returns
-------
A Gel object.
"""
pass
def setValues(self):
"""This method modifies the Gel object."""
pass
|
from sklearn.base import BaseEstimator, TransformerMixin
import numpy as np
import pandas as pd
class first_sums(BaseEstimator, TransformerMixin):
"""
a general class for creating a machine learning step in the machine learning pipeline
"""
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
df = X.copy()
home = pd.DataFrame()
away = pd.DataFrame()
for year in df['season'].unique():
for team in df['Home'].unique():
tmp_1 = df.copy()
tmp_2 = df.copy()
tmp_1 = tmp_1[(tmp_1['Home'] == team) & (tmp_1['season'] == year)]
tmp_1 = tmp_1[['season', 'Wk', 'Home', 'home_win',
'home_draw', 'home_lose', 'goals_home', 'goals_away']]
tmp_1.rename(columns={'Home': 'team', 'home_win': 'win', 'home_draw': 'draw',
'home_lose': 'lose', 'goals_home': 'goals_for', 'goals_away': 'goals_against'},
inplace=True)
tmp_1['home'] = 1
tmp_2 = tmp_2[(tmp_2['Away'] == team) & (tmp_2['season'] == year)]
tmp_2 = tmp_2[['season', 'Wk', 'Away', 'away_win',
'away_draw', 'away_lose', 'goals_away', 'goals_home']]
tmp_2.rename(columns={'Away': 'team', 'away_win': 'win', 'away_draw': 'draw',
'away_lose': 'lose', 'goals_away': 'goals_for', 'goals_home': 'goals_against'},
inplace=True)
tmp_2['home'] = 0
tmp = tmp_1.append(tmp_2)
tmp.sort_values(['season', 'Wk'], inplace=True)
tmp[['win', 'draw', 'lose', 'goals_for', 'goals_against']] = tmp[[
'win', 'draw', 'lose', 'goals_for', 'goals_against']].transform('cumsum')
home = home.append(tmp[tmp['home'] == 1].drop(columns='home'))
away = away.append(tmp[tmp['home'] == 0].drop(columns='home'))
home.columns = [col + '_home' for col in home.columns]
df = df.merge(home.drop(columns=['season_home', 'Wk_home', 'team_home']),
left_index=True,
right_index=True)
away.columns = [col + '_away' for col in away.columns]
df = df.merge(away.drop(columns=['season_away', 'Wk_away', 'team_away']),
left_index=True,
right_index=True)
df[['ht_home_win', 'ht_home_draw', 'ht_home_lose', 'ht_home_goals_for', 'ht_home_goals_against']] = \
df.groupby(['season', 'Home'])[['home_win', 'home_draw', 'home_lose', 'goals_home', 'goals_away']].transform(
'cumsum')
df[['at_home_win', 'at_home_draw', 'at_home_lose', 'at_home_goals_for', 'at_home_goals_against']] = \
df.groupby(['season', 'Away'])[['away_win', 'away_draw', 'away_lose', 'goals_home', 'goals_away']].transform(
'cumsum')
df.drop(columns=['home_win', 'home_draw', 'home_lose', 'goals_home', 'goals_away', 'away_win', 'away_draw',
'away_lose'], inplace=True)
return df
|
#class Solution:
# def beautySum(self, s: str) -> int:
def beautySum(s):
if len(s) <= 2:
return 0
# Now assured that len(s) >= 3.
somme = 0
for head in range(len(s) - 2):
statistics = {s[head]: 1}
if s[head+1] in statistics:
statistics[s[head+1]] += 1
else:
statistics[s[head+1]] = 1
for tail in range(head+2, len(s)):
if s[tail] in statistics:
statistics[s[tail]] += 1
else:
statistics[s[tail]] = 1
somme += max(statistics.values()) - min(statistics.values())
return somme
def test_case(k, s, expected):
print(f"Test case {k:02d}")
ans = beautySum(s)
if ans == expected:
print("Correct.")
else:
print(f"Incorrect. ans = {ans}, expected = {expected}")
if __name__ == "__main__":
test_case(1, "aabcb", 5)
test_case(2, "aabcbaa", 17)
|
import unittest
from ho600_ltd_libraries.utils.tests import *
if __name__ == '__main__':
unittest.main()
|
import matplotlib.pyplot as plt
import pygad as pg
import numpy as np
import h5py
import caesar
ssfr_lim = -1.5
n_lim = 1000
model = 'm50n512'
snap = '151'
wind = 's50j7k'
plot_dir = './all_pygad_plots/'
factor = 10.
softening = pg.UnitArr([0., 0., 0., 0., 0.25, 0.], 'ckpc h_0**-1')
xaxis = [0, 1, 2]
yaxis = [1, 2, 0]
zaxis = [2, 0, 1]
gals = [20, 138, 230]
data_dir = '/home/rad/data/'+model+'/'+wind+'/'
sim = caesar.load(data_dir+'Groups/'+model+'_'+snap+'.hdf5')
# stellar mass maps
gal_n = np.array([len(i.glist) for i in sim.central_galaxies])
gal_rad = np.array([i.radii['stellar_half_mass'].in_units('kpc') for i in sim.central_galaxies])
gal_sm = np.array([i.masses['stellar'].in_units('Msun') for i in sim.central_galaxies])
gal_sfr = np.array([i.sfr.in_units('Msun/Gyr') for i in sim.central_galaxies])
gal_ssfr = np.log10(gal_sfr / gal_sm)
ssfr_mask = gal_ssfr > ssfr_lim
n_mask = gal_n > n_lim
gals = np.arange(len(sim.central_galaxies))[(ssfr_mask*n_mask)]
s = pg.Snap(data_dir+'snap_'+model+'_'+snap+'.hdf5')
h = s.cosmology.h()
z = s.redshift
bh_pos = s.bh['pos'].in_units_of('kpc')
abundance_h1 = s.gas['NeutralHydrogenAbundance']
abundance_h2 = s.gas['fh2']
mass = s.gas['mass'].in_units_of('Msol')
s.gas['mass_h1'] = abundance_h1*mass
s.gas['mass_h2'] = abundance_h2*mass
for i in gals:
pos = bh_pos[sim.central_galaxies[i].bhlist[0]]
radius = str(round((factor+2)*gal_rad[i], 2)) + ' kpc'
ball = s[pg.BallMask(radius, center=pos)]
#extent = pg.UnitArr([[pos[0] - factor*gal_rad[i], pos[0] + factor*gal_rad[i]], [pos[1] - factor*gal_rad[i], pos[1] + factor*gal_rad[i]]], 'kpc')
#args_sm = dict(cmap='magma', fontsize=8, Npx=256, softening=softening, extent=extent, vlim=[10.**-3.5, 10.**-0.1])
#args_h1 = dict(cmap='magma', fontsize=8, Npx=256, extent=extent)
args_sm = dict(cmap='magma', fontsize=8, Npx=1024, softening=softening, vlim=[10.**-3.5, 10.**-0.1])
args_h1 = dict(cmap='magma', fontsize=8, Npx=1024, vlim=[4.e3, 1.e9], cbartitle='')
fig, axes = plt.subplots(1,3, figsize=(12,6))
for j in range(len(xaxis)):
pg.plotting.image(ball.gas, qty='mass_h1', xaxis=xaxis[j], yaxis=yaxis[j], ax=axes[j], **args_h1)
plt.savefig(plot_dir+'gal_'+str(i)+'_h1_pygad.png')
plt.clf()
"""
radius = str(round((factor)*gal_rad[i], 2)) + ' kpc'
ball = s[pg.BallMask(radius, center=pos)]
args_h1 = dict(cmap='magma', fontsize=8, Npx=1024, vlim=[10.**3., 10.**9.])
fig, axes = plt.subplots(1,3, figsize=(12,6))
for j in range(len(xaxis)):
pg.plotting.image(ball.gas, qty='mass_h2', xaxis=xaxis[j], yaxis=yaxis[j], ax=axes[j], **args_h1)
plt.savefig(plot_dir+'gal_'+str(i)+'_h2_small_pygad.png')
plt.clf()
"""
"""
# HI column density
gal_rad = np.array([i.radii['stellar_half_mass'].in_units('cm') for i in sim.central_galaxies])
s = pg.Snap(data_dir+'snap_'+model+'_'+snap+'.hdf5')
h = s.cosmology.h()
z = s.redshift
abundance_h1 = s.gas['NeutralHydrogenAbundance']
mass = s.gas['mass'].in_units_of('Msol')
s.gas['mass_h1'] = abundance_h1*mass
s.gas['N_HI'] = s.gas['mass_h1'].in_units_of('1e+30 kg') / pg.cosmology.m_p.in_units_of('kg')
bh_pos = s.bh['pos'].in_units_of('cm')
s['pos'].convert_to('cm')
for i in gals:
pos = bh_pos[sim.central_galaxies[i].bhlist[0]]
radius = str(round(factor*gal_rad[i], 2)) + ' cm'
ball = s[pg.BallMask(radius, center=pos)]
extent = pg.UnitQty(gal_rad[i]*factor, 'cm')
extent = [[pg.UnitQty(pos[0] - factor*gal_rad[i], 'cm'), pg.UnitQty(pos[0] + factor*gal_rad[i], 'cm')],
[pg.UnitQty(pos[1] - factor*gal_rad[i], 'cm'), pg.UnitQty(pos[1] + factor*gal_rad[i], 'cm')]]
args = dict(cmap='magma', fontsize=8, Npx=256)
ball.gas['pos'].convert_to('cm')
ball.gas['hsml'].convert_to('cm')
pg.plotting.image(ball.gas, qty='N_HI', xaxis=0, yaxis=1, **args)
plt.savefig(plot_dir+'gal_'+str(i)+'_HI_pygad.png')
plt.clf()
"""
|
# Make a calculator that does +, -, *, and /, however, alerting if the special "ZeroDivisionError" or any errors occur.
import sys
def InputFunction():
try:
FirstNumber = float(input("What is the first number? "))
SecondNumber = float(input("What is the second number? "))
Operation = input("What is the operation? +, -, *, or / ")
if Operation == "/":
Divide(FirstNumber, SecondNumber)
elif Operation == "+":
Add(FirstNumber, SecondNumber)
elif Operation == "*":
Multiply(FirstNumber, SecondNumber)
elif Operation == "-":
Subtract(FirstNumber, SecondNumber)
else:
print("We're sorry, however, your command cannot be recognized. Please look at the operation syntax on the prompt.")
InputFunction()
except:
ErrorName = sys.exc_info()[0]
ErrorHandle(ErrorName)
def Add(FirstNumber, SecondNumber):
try:
AddResult = FirstNumber + SecondNumber
print(AddResult)
except:
ErrorName = sys.exc_info()[0]
ErrorHandle(ErrorName)
AskContinue()
def Subtract(FirstNumber, SecondNumber):
try:
SubtractResults = FirstNumber - SecondNumber
print(SubtractResults)
except:
ErrorName = sys.exc_info()[0]
ErrorHandle(ErrorName)
AskContinue()
def Multiply(FirstNumber, SecondNumber):
try:
MultiplyResults = FirstNumber * SecondNumber
print(MultiplyResults)
except:
ErrorName = sys.exc_info()[0]
ErrorHandle(ErrorName)
AskContinue()
def Divide(FirstNumber, SecondNumber):
try:
DivisionResult = FirstNumber / SecondNumber
print(DivisionResult)
Flag = False
except ZeroDivisionError:
ErrorName = sys.exc_info()[0]
print("We're sorry for the error. This error will occur when you try to divide a number by zero.")
print(ErrorName)
print("Please try again.")
InputFunction()
except:
ErrorName = sys.exc_info()[0]
ErrorHandle(ErrorName)
AskContinue()
def AskContinue():
try:
Continue = input("Do you want to continue? ")
ContinueString = Continue.upper()
if ContinueString == "YES":
InputFunction()
else:
print("Thank you!")
except:
ErrorName = sys.exc_info()[0]
ErrorHandle(ErrorName)
def ErrorHandle(ErrorName):
print("We're sorry, an error has occurred.")
print("Please use this error name when contacting support or troubleshooting: ")
print(ErrorName)
print("Please try again.")
InputFunction()
print("Welcome to the calculator!")
InputFunction()
|
# -*- coding: utf-8 -*-
"""WSGI server."""
import argparse
import sys
from flask import Flask, request, jsonify
from flask_cors import CORS
from werkzeug.exceptions import BadRequest, NotFound, InternalServerError
from .columns import list_columns, update_column
from .datasets import list_datasets, create_dataset, get_dataset
from .samples import init_datasets
app = Flask(__name__)
@app.route("/", methods=["GET"])
def ping():
"""Handles GET requests to /."""
return "pong"
@app.route("/datasets", methods=["GET"])
def handle_list_datasets():
"""Handles GET requests to /datasets."""
return jsonify(list_datasets())
@app.route("/datasets", methods=["POST"])
def handle_post_datasets():
"""Handles POST requests to /datasets."""
return jsonify(create_dataset(request.files))
@app.route("/datasets/<name>", methods=["GET"])
def handle_get_dataset(name):
"""Handles GET requests to /datasets/<name>."""
return jsonify(get_dataset(name))
@app.route("/datasets/<dataset>/columns", methods=["GET"])
def handle_list_columns(dataset):
"""Handles GET requests to /datasets/<dataset>/columns."""
return jsonify(list_columns(dataset))
@app.route("/datasets/<dataset>/columns/<column>", methods=["PATCH"])
def handle_patch_column(dataset, column):
"""Handles PATCH requests to /datasets/<dataset>/columns/<column>."""
featuretype = request.get_json().get("featuretype")
return jsonify(update_column(dataset, column, featuretype))
@app.errorhandler(BadRequest)
@app.errorhandler(NotFound)
@app.errorhandler(InternalServerError)
def handle_errors(e):
"""Handles exceptions raised by the API."""
return jsonify({"message": e.description}), e.code
def parse_args(args):
"""Takes argv and parses API options."""
parser = argparse.ArgumentParser(
description="Datasets API"
)
parser.add_argument(
"--port", type=int, default=8080, help="Port for HTTP server (default: 8080)"
)
parser.add_argument("--enable-cors", action="count")
parser.add_argument(
"--debug", action="count", help="Enable debug"
)
parser.add_argument(
"--samples-config", help="Path to sample datasets config file."
)
return parser.parse_args(args)
if __name__ == "__main__":
args = parse_args(sys.argv[1:])
# Enable CORS if required
if args.enable_cors:
CORS(app)
# Install sample datasets if required
if args.samples_config:
init_datasets(args.samples_config)
app.run(host="0.0.0.0", port=args.port, debug=args.debug)
|
"""MixtapeServeur URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.conf import settings
from django.contrib import admin
from django.shortcuts import render_to_response
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^music/', include('MixtapeServeur.apps.music.urls')),
url(r'^station/', include('MixtapeServeur.apps.station.urls')),
url(r'^MixTapeUser/', include('MixtapeServeur.apps.mixtapeUser.urls')),
url(r'^$', views.monitor),
]
# debug toolbar for dev
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
|
# Copyright (c) SkyTruth
# Author: Egil Moeller <egil@skytruth.org>
# Parts of the code reused from loaddata.py and dumpdata.py from Django
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.db import router, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
import django.db.transaction
import os
import sys
from optparse import make_option
from django.conf import settings
from django.core.management.color import no_style
from django.db import (connections, router, transaction, DEFAULT_DB_ALIAS,
IntegrityError, DatabaseError)
from django.db.models import get_apps
from django.utils.encoding import force_text
from django.utils._os import upath
from itertools import product
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--source', action='store', dest='source',
default="default", help='Nominates a specific database to load from. Defaults to the "default" database.'),
make_option('--destination', action='store', dest='destination',
default="destination", help='Nominates a specific database to copy to. Defaults to the "destination" database.'),
make_option('-e', '--exclude', dest='exclude',action='append', default=[],
help='An appname or appname.ModelName to exclude (use multiple --exclude to exclude multiple apps/models).'),
make_option('-n', '--natural', action='store_true', dest='use_natural_keys', default=False,
help='Use natural keys if they are available.'),
make_option('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, including those that would otherwise be filtered or modified by a custom manager."),
make_option('--ignorenonexistent', '-i', action='store_true', dest='ignore',
default=False, help='Ignores entries in the serialized data for fields'
' that do not currently exist on the model.'),
)
help = ("Copies model data from a one database to another (using each model's default manager unless --all is "
"specified).")
args = '[appname appname.ModelName ...]'
def handle(self, *args, **kwargs):
try:
return self.handle2(*args, **kwargs)
except Exception, e:
print e
import traceback
traceback.print_exc()
def handle2(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_model
source = options.get('source')
destination = options.get('destination')
excludes = options.get('exclude')
show_traceback = options.get('traceback')
use_natural_keys = options.get('use_natural_keys')
use_base_manager = options.get('use_base_manager')
verbosity = int(options.get('verbosity'))
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
app_label, model_name = exclude.split('.', 1)
model_obj = get_model(app_label, model_name)
if not model_obj:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model_obj)
else:
try:
app_obj = get_app(exclude)
excluded_apps.add(app_obj)
except ImproperlyConfigured:
raise CommandError('Unknown app in excludes: %s' % exclude)
if len(app_labels) == 0:
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
app_list[app] = None
def get_objects():
# Collate the objects to be serialized.
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(source, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
for obj in objects.using(source).\
order_by(model._meta.pk.name).iterator():
yield obj
connection = connections[destination]
cursor = connection.cursor()
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
transaction.commit_unless_managed(using=destination)
transaction.enter_transaction_management(using=destination)
transaction.managed(True, using=destination)
try:
with connection.constraint_checks_disabled():
for obj in get_objects():
self.fixture_object_count += 1
self.models.add(obj.__class__)
try:
obj.save(using=destination, force_insert = True)
self.loaded_object_count += 1
sys.stdout.write(".")
sys.stdout.flush()
except (DatabaseError, IntegrityError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj._meta.app_label,
'object_name': obj._meta.object_name,
'pk': obj.pk,
'error_msg': force_text(e)
},)
raise
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e:
transaction.rollback(using=destination)
transaction.leave_transaction_management(using=destination)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if verbosity >= 2:
self.stdout.write("Resetting sequences\n")
for line in sequence_sql:
cursor.execute(line)
transaction.commit(using=destination)
transaction.leave_transaction_management(using=destination)
if verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count:
self.stdout.write("Installed %d object(s)" % (self.loaded_object_count,))
else:
self.stdout.write("Installed %d object(s) (of %d)" % (
self.loaded_object_count, self.fixture_object_count))
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
connection.close()
def sort_dependencies(app_list):
"""Sort a list of app,modellist pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
from django.db.models import get_model, get_models
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app, model_list in app_list:
if model_list is None:
model_list = get_models(app)
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [get_model(*d.split('.')) for d in deps]
else:
deps = []
# Now add a dependency for any FK or M2M relation with
# a model that defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
for field in model._meta.many_to_many:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise CommandError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
|
import sys
from string import punctuation
def sort(tup):
return tup[1]
vowels = {
'a': 0,
'e': 0,
'i': 0,
'o': 0,
'u': 0
}
for line in sys.stdin:
line = line.strip().split()
for word in line:
word = word.strip(punctuation).lower()
if not word:
continue
for letter in word:
if letter in vowels:
vowels[letter] += 1
len_largest_int = 0
for n in vowels.values():
if len(str(n)) > len_largest_int:
len_largest_int = len(str(n))
for v in sorted(vowels.items(), key = sort, reverse = True):
print ('{:s} : {:{:d}d}'.format(v[0], v[1], len_largest_int)) |
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 18:39:16 2019
@author: Alex
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.tree import DecisionTreeClassifier
import graphviz
from sklearn.tree import export_graphviz
#carregamento dos dados
credito = pd.read_csv("Credit.csv")
#separando dados de previsao
previsores = credito.iloc[:,0:20].values
#separando a classe
classe = credito.iloc[:,20].values
#preparando dados discretos para criar o modelo
labelencoder = LabelEncoder()
previsores[:, 0] = labelencoder.fit_transform(previsores[:, 0])
previsores[:, 2] = labelencoder.fit_transform(previsores[:, 2])
previsores[:, 3] = labelencoder.fit_transform(previsores[:, 3])
previsores[:, 5] = labelencoder.fit_transform(previsores[:, 5])
previsores[:, 6] = labelencoder.fit_transform(previsores[:, 6])
previsores[:, 8] = labelencoder.fit_transform(previsores[:, 8])
previsores[:, 9] = labelencoder.fit_transform(previsores[:, 9])
previsores[:, 11] = labelencoder.fit_transform(previsores[:, 11])
previsores[:, 13] = labelencoder.fit_transform(previsores[:, 14])
previsores[:, 14] = labelencoder.fit_transform(previsores[:, 14])
previsores[:, 16] = labelencoder.fit_transform(previsores[:, 16])
previsores[:, 18] = labelencoder.fit_transform(previsores[:, 18])
previsores[:, 19] = labelencoder.fit_transform(previsores[:, 19])
#criando o modelo
X_treinamento, X_teste, y_treinamento, y_teste = train_test_split(previsores,
classe,
test_size = 0.3,
random_state = 0)
#criando o modelo
modelo = DecisionTreeClassifier()
modelo.fit(X_treinamento, y_treinamento)
#visualizando a arvore gerado
export_graphviz(modelo, out_file = 'tree.dot')
#fazendo previsoes
previsoes = modelo.predict(X_teste)
#calculando acuracia do modelo
confusao = confusion_matrix(y_teste, previsoes)
acuracia = accuracy_score(y_teste, previsoes)
erro = 1 - accuracy_score(y_teste, previsoes)
|
# This implementation of Lanczos interpolation is just too slow to run
# import matplotlib
# matplotlib.use('Agg')
import sys
import os
from glob import glob
import numpy as np
import fitsio
import astropy.io.fits as fits
import fitsio
from desitarget.targetmask import desi_mask
from desitarget.geomask import match
# from raichoorlib import get_line_list
# import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import MultipleLocator
import time
from astropy.table import Table, vstack, hstack, join
from scipy.ndimage import gaussian_filter1d
time_start = time.time()
def lanczos_kernal(x, a):
if x>-a and x<a:
return np.sinc(x) * np.sinc(x/a)
else:
return 0
def lanczoc_interp(x, s, a):
ss = 0.
for ii in range(int(np.floor(x))-a+1, int(np.floor(x))+a+1):
if ii>=0 and ii<=len(s)-1:
ss += s[ii] * lanczos_kernal(x-ii, a)
return ss
rfwmin, rfwmax, delta = 1800., 7017., 0.1
rfws = np.round(np.arange(rfwmin, rfwmax + delta, delta), 2)
d = Table(fitsio.read('/global/cfs/cdirs/desi/users/rongpu/tmp/lrgs_for_stacking.fits'))
# ###################################
# np.random.seed(718751)
# idx = np.random.choice(len(d), size=1000, replace=False)
# d = d[idx]
# ###################################
n = np.zeros(len(rfws), dtype=int)
fl, fl_smooth, iv, iv_smooth = np.zeros(len(rfws)), np.zeros(len(rfws)), np.zeros(len(rfws)), np.zeros(len(rfws))
#
tileids = np.unique(d['TILEID'])
for index, tileid in enumerate(tileids):
# targetid and zspec
sel0 = d['TILEID']==tileid
thrunight = d['thrunight'][sel0][0]
petals = np.unique(d['PETAL_LOC'][sel0])
print(index, '/', len(tileids), sel0.sum())
for petal in petals:
sel = (d['TILEID']==tileid) & (d['PETAL_LOC']==petal)
tids = d['TARGETID'][sel]
zs = d['Z'][sel]
fn = '/global/cfs/cdirs/desi/spectro/redux/everest/tiles/cumulative/{}/{}/coadd-{}-{}-thru{}.fits'.format(tileid, thrunight, petal, tileid, thrunight)
h = fits.open(fn)
ii, iisp = match(tids, h['FIBERMAP'].data['TARGETID'])
# looping on the cameras
for camera in ['B', 'R', 'Z']:
# reading
wsp = h['{}_WAVELENGTH'.format(camera)].data
flsp = h['{}_FLUX'.format(camera)].data
ivsp = h['{}_IVAR'.format(camera)].data
msk = h['{}_MASK'.format(camera)].data
ivsp[msk!=0] = 0.
# looping through each spectrum...
# interpolating to rest-frame wavelengths
for i, isp in zip(ii, iisp):
flsp_smooth = gaussian_filter1d(flsp[isp], 150)
flsp_desmooth = flsp[isp] - flsp_smooth
ivsp_smooth = gaussian_filter1d(ivsp[isp], 150)
ixx = np.interp(rfws, wsp / (1 + zs[i]), np.arange(len(wsp)))
rfflsp, rfivsp = np.zeros(len(rfws)), np.zeros(len(rfws))
for index in range(len(rfflsp)):
rfflsp[index] = lanczoc_interp(ixx[index], flsp_desmooth, 3)
rfivsp[index] = lanczoc_interp(ixx[index], ivsp[isp], 3)
# rfflsp = np.interp(rfws, wsp / (1 + zs[i]), flsp_desmooth, left=0, right=0)
# rfivsp = np.interp(rfws, wsp / (1 + zs[i]), ivsp[isp], left=0, right=0)
fl += rfflsp * rfivsp
iv += rfivsp
rfflsp_smooth = np.interp(rfws, wsp / (1 + zs[i]), flsp_smooth, left=0, right=0)
rfivsp_smooth = np.interp(rfws, wsp / (1 + zs[i]), ivsp_smooth, left=0, right=0)
fl_smooth += rfflsp_smooth * rfivsp_smooth
iv_smooth += rfivsp_smooth
n[rfivsp > 0] += 1
#
sel = iv > 0
fl[sel] /= iv[sel]
fl_smooth[sel] /= iv_smooth[sel]
#
cols = []
cols += [fits.Column(name='wavelength', format='E', array=rfws)]
cols += [fits.Column(name='flux', format='E', array=fl)]
cols += [fits.Column(name='flux_smooth', format='E', array=fl_smooth)]
cols += [fits.Column(name='nspec', format='K', array=n)]
h = fits.BinTableHDU.from_columns(fits.ColDefs(cols))
h.writeto('/global/cfs/cdirs/desi/users/rongpu/tmp/lrg_stacked_spectra_lanczos.fits', overwrite=True)
print(time.strftime("%H:%M:%S", time.gmtime(time.time() - time_start)))
|
from os import sys, path
from importlib import import_module
def getSettings(environment: str, defaultConfig: dict):
'''
getSettings function import the environment config given by argument
when our application is run.
@environment: string
@defaultConfig: dictionary contains application settings
@return: config dictionary
'''
try:
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))) + '/config/env')
module = import_module(environment)
return module.config
except:
return defaultConfig
|
# Generated by Django 2.2.7 on 2019-11-25 10:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("hipeac", "0046_hipeac_hipeacpartner"),
]
operations = [
migrations.AddField(model_name="hipeac", name="visible", field=models.BooleanField(default=False),),
]
|
__author__ = 'lotso'
from scrapy import cmdline
# cmdline.execute("scrapy crawl asusMBspider -o asusMB.json".split())
cmdline.execute("scrapy shell http://www.asus.com/Motherboards/Intel_Platform_Products/".split())
|
print("Enter the string:")
string= str(input())
print("You entered:",string)
l= len(string)
palin=True
for i in range(0,l):
if i==l:
pal= True
elif string[i]==string[l-1-i]:
pal= True
else:
palin= False
print("mismatch at",i,l-i)
if palin== False:
print("it is not apalindrome")
else:
print("Palindrome") |
# This file loads all the configfiles from ~/.config/qutebrowser/config.d/
## Documentation:
## qute://help/configuring.html
## qute://help/settings.html
import os
config = config # noqa
CONF_DIR = os.path.expanduser("~/.config/qutebrowser/config.d/")
for file in os.listdir(CONF_DIR):
if file.endswith(".py"):
config.source(os.path.join(CONF_DIR, file))
|
from urllib2 import urlopen
from json import load,dumps
import re
from random import sample
from multiprocessing import Pool
def splitpgraph(pgraph):
return re.split('\w{5,}\. ',pgraph)
def getFacts(subject, num_titles=3, num_sentences=27):
subject = re.sub(' ','_',subject) # put input into wiki format i.e. 'albert einstein' --> 'Albert_Einstein'
translate = [
(r'\[\[(?:[^|\]]*\|)?([^\]]+)\]\]',r'\1'),
('<.*>',''),
('\{\{ *.* *\}\}',''),
('\n',' '),
('\'\'+',''),
('\|.*\|',''),
('\[\[.*\]\]',''),
('\[\[.*\]\]',''),
(' \* ',''),
('File:.*\|',''),
(' ',' '),
('\w*\|\w*',''),
('\w*\}\}',''),
('[%${}]',''),
]
def getTitle(section, text):
regex1 = r"\=\=(.*?)\=\="
header = re.search(regex1, text)
return header.group(1)
def getText(section):
url = 'http://en.wikipedia.org/w/api.php?action=query&prop=revisions&rvprop=content&rvsection=%d&titles=%s&format=json&redirects=true'
article = load(urlopen(url % (section, subject)))
if not 'query' in article:
article = load(urlopen(url % (section, subject.title())))
if not 'query' in article:
article = load(urlopen(url % (section, subject.lower())))
articleID = article['query']['pages'].keys()
articleText = article['query']['pages'][articleID[0]]['revisions'][0]['*']
for substitution in translate:
articleText = re.sub(substitution[0], substitution[1],articleText)
return articleText
n = num_sentences/num_titles
titles = [getTitle(i, getText(i)) for i in xrange(1, num_titles+1)]
text = []
section = 1
while len(text) < num_sentences:
try:
text += splitpgraph(getText(section))
section += 1
except KeyError:
break
while len(text) < num_sentences:
text += ['james']
text = sample(text, num_sentences)
for i in xrange(len(text)):
text[i] = re.sub('\=*.*?\=*','',text[i]) # now that titles have been extracted, get rid of remaining subtitles
for i in xrange(len(titles)):
titles[i] = re.sub('=','',titles[i]) # get rid of stray =
return titles, text
if __name__ == '__main__':
subject = raw_input('topic: ')
titles, text = getFacts(subject)
for i, title in enumerate(titles):
print i, title
for i, t in enumerate(text):
print i, t
|
from django.shortcuts import render, render_to_response
from decimal import Decimal
# Create your views here.
def home_view(request):
num_a = request.GET.get("num_a")
function = request.GET.get("function")
num_b = request.GET.get("num_b")
context = perform_function(num_a, function, num_b)
return render_to_response(template_name='base.html', context=context)
def perform_function(num_a, function, num_b):
if "." in num_a or "." in num_b:
a = float(num_a)
b = float(num_b)
else:
a = int(num_a)
b = int(num_b)
if function == "+":
answer = a + b
elif function == "-":
answer = a - b
elif function == "*":
answer = a * b
elif function == "/":
answer = a / b
elif function == "**":
answer = a ** b
else:
answer = "I think you entered something wrong..."
return {"response": "{} {} {} = {}".format(num_a, function, num_b, answer)}
|
import tornado.web
import json
import logging
from Crypto.CommonEvclide import CommonEvclide
async def Evclide(a, b):
return CommonEvclide(a, b)
class EvclideHandler(tornado.web.RequestHandler):
async def post(self):
body = self.request.body.decode("UTF8")
message = json.loads(body)
logging.info("Got message %r" % message)
gr = await Evclide(int(message['k1']), int(message['k2']))
data = {
'k1': str(gr[1]),
'k2': str(gr[2])
}
self.write(data)
if __name__ == "__main__":
pass |
#! /usr/bin/env python
# encoding:utf-8
def merge(list1,list2):
return dict(zip(list1,list2))
if __name__ == "__main__":
list1 = [1,2,3]
list2 = ['abc','def','ghi']
print merge(list1,list2)
|
"""
Simple task list.
"""
def new(tasklist, task):
"""Add new task"""
tasklist.append(task)
def remove_by_num(tasklist, tasknum):
"""Remove by number"""
if tasknum > 0 and tasknum <= len(tasklist):
tasklist.pop(tasknum - 1)
def remove_by_name(tasklist, taskname):
"""Remove by name"""
if taskname in tasklist:
tasklist.remove(taskname)
def printlist(tasklist):
"""Print task list"""
print("========================")
num = 1
for task in tasklist:
print(num, task)
num += 1
print("========================")
def run():
"""Manipulate task list"""
tasks = []
new(tasks, 'Teach Class')
printlist(tasks)
new(tasks, 'Buy some ties')
new(tasks, 'Learn Python')
printlist(tasks)
new(tasks, 'Build new task list')
printlist(tasks)
remove_by_num(tasks, 1)
printlist(tasks)
remove_by_num(tasks, 2)
printlist(tasks)
remove_by_name(tasks, 'Buy some ties')
printlist(tasks)
run()
|
import io
import sys
def parse_group(stream):
abc = 256*[0]
count = 0
while True:
str = stream.readline()
if str == "": return None
if str == "\n": break
for q in str.strip(): abc[ord(q)] += 1
count += 1
sum = 0
for elt in abc:
if elt == count: sum += 1
return sum
sum = 0
stream = open(sys.argv[1])
while True:
count = parse_group(stream)
if count == None: break
sum += count
print(sum)
|
#
# [273] Integer to English Words
#
# https://leetcode.com/problems/integer-to-english-words/description/
#
# algorithms
# Hard (22.95%)
# Total Accepted: 66.7K
# Total Submissions: 289.2K
# Testcase Example: '123'
#
# Convert a non-negative integer to its english words representation. Given
# input is guaranteed to be less than 231 - 1.
#
# Example 1:
#
#
# Input: 123
# Output: "One Hundred Twenty Three"
#
#
# Example 2:
#
#
# Input: 12345
# Output: "Twelve Thousand Three Hundred Forty Five"
#
# Example 3:
#
#
# Input: 1234567
# Output: "One Million Two Hundred Thirty Four Thousand Five Hundred Sixty
# Seven"
#
#
# Example 4:
#
#
# Input: 1234567891
# Output: "One Billion Two Hundred Thirty Four Million Five Hundred Sixty Seven
# Thousand Eight Hundred Ninety One"
#
#
#
class Solution:
single_digit = {1:"One", 2:"Two", 3:"Three", 4:"Four", 5:"Five"
, 6:"Six", 7:"Seven", 8:"Eight", 9:"Nine"}
double_digit = {10:"Ten", 11:"Eleven", 12:"Twelve", 13:"Thirteen", 14:"Fourteen", 15:"Fifteen"
, 16:"Sixteen", 17:"Seventeen", 18:"Eighteen", 19:"Nineteen", 2:"Twenty"
, 3:"Thirty", 4:"Forty", 5:"Fifty", 6:"Sixty", 7:"Seventy", 8:"Eighty"
, 9:"Ninety"}
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
if num == 0:
return "Zero"
result = ""
i = 0
while num > 0:
tmp_result = self.threeDigitNumberToWords(num % 1000)
if i == 1 and tmp_result != "":
tmp_result = "{} Thousand ".format(tmp_result)
elif i == 2 and tmp_result != "":
tmp_result = "{} Million ".format(tmp_result)
elif i == 3 and tmp_result != "":
tmp_result = "{} Billion ".format(tmp_result)
result = tmp_result + result
num = int(num / 1000)
i += 1
if result[0] == " ":
result = result[1:]
if result[-1] == " ":
result = result[:-1]
return result
def threeDigitNumberToWords(self, num):
result = ""
if num > 99:
result += "{} {}".format(self.single_digit[int(num / 100)], "Hundred")
num = num % 100
if num > 9 and num < 20:
result += " {}".format(self.double_digit[num])
if len(result) > 0 and result[0] == " ":
result = result[1:]
if len(result) > 0 and result[-1] == " ":
result = result[:-1]
return result
elif num >= 20:
result += " {}".format(self.double_digit[int(num / 10)])
num = num % 10
if num != 0:
result += " {}".format(self.single_digit[num])
if len(result) > 0 and result[0] == " ":
result = result[1:]
if len(result) > 0 and result[-1] == " ":
result = result[:-1]
return result
sol = Solution()
print(sol.numberToWords(1234567891))
|
'''
终止程序并给出错误信息
'''
import sys
if __name__ == "__main__":
sys.stderr.write("It failed!\n")
raise SystemExit(1)
print("hhaha") |
import random
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import sys
import copy
def cost_calculation(A,B):
Ax = list(A)[0]
Ay = list(A)[1]
Bx = list(B)[0]
By = list(B)[1]
distance = ((Ax-Bx) ** 2 + (Ay-By)**2)**.5
return distance
def a_star_search(ocean_edges, start, end):
current = start
costs_so_far = 0
visited = []
dead_end = []
visited.append(current)
while visited[-1] != end_node:
var = input(" ")
current = visited[-1]
costs = [None] * len(ocean_edges[current]['neighbours'])
#print('at ', current)
for potential_next in range(len(ocean_edges[current]['neighbours'])):
if not ocean_edges[current]['neighbours'][potential_next] in visited + dead_end:
cost = ocean_edges[current]['cost'][potential_next]
estimated_cost = cost_calculation(
ocean_edges[current]['coordinates'], ocean_edges[potential_next]['coordinates']
)
costs[potential_next] = cost + estimated_cost
if [x for x in costs if x]: #is_all_none(costs):
mincosts = costs.index(min(x for x in costs if x is not None))
visited.append(ocean_edges[current]['neighbours'][mincosts])
costs_so_far += ocean_edges[current]['cost'][mincosts]
delete_edge = True
else: #no further point is reachable from current point, search for another way
print('dead end')
dead_end.append(current)
# go back to previous node and delete current from visited
#costs_so_far = ocean_edges.cost[visited[-2]][mincosts]
print(visited)
if len(visited) == 1:
delete_edge = False
visited[-1] = end_node
else:
del visited[-1]
current = visited[-1]
#delete_edge = False
#visited[-1] = end_node
return delete_edge, visited
nodes = list(range(0, 5))
coordinates = {
0: (0, 3),
1: (1, 2),
2: (5, 5),
3: (4, 3),
4: (2, 5)
}
neighbours = {
0: [1, 3],
1: [0, 2, 3, 4],
2: [1],
3: [4, 1],
4: [1, 3]
}
ocean_edges = {}
for key in nodes:
ocean_edges[key] = {
'coordinates': coordinates[key],
'neighbours': neighbours[key],
'cost': list(map(lambda v: cost_calculation(coordinates[v], coordinates[key]), neighbours[key]))
}
print(ocean_edges[0]['neighbours'])
fig, ax = plt.subplots(3, 3)
#ax.get_yaxis().set_visible(True)
#ax.get_xaxis().set_visible(True)
#is_all_none = lambda L: not len(filter(lambda e: not e is None, L))
for k in ocean_edges.keys():
ax[0, 0].scatter(list(ocean_edges[k]['coordinates'])[0], list(ocean_edges[k]['coordinates'])[1], marker='o', color='red')
ax[0, 0].annotate(str(k), ocean_edges[k]['coordinates'])
for n in ocean_edges[k]['neighbours']:
ax[0, 0].arrow(
list(ocean_edges[k]['coordinates'])[0],
list(ocean_edges[k]['coordinates'])[1],
(list(ocean_edges[n]['coordinates'])[0] - list(ocean_edges[k]['coordinates'])[0]) / 1.2,
(list(ocean_edges[n]['coordinates'])[1] - list(ocean_edges[k]['coordinates'])[1]) / 1.2,
color='blue', head_width=0.1
)
invalid_edges = [(0, 3), (1, 4), (4, 1), (1, 2)]
xx = [0, 0, 1, 1, 1, 2, 2, 2]
yy = [1, 2, 0, 1, 2, 0, 1, 2]
c = 0
for i in invalid_edges:
start_node = list(i)[0]
end_node = list(i)[1]
ocean_edges2 = copy.deepcopy(ocean_edges)
index = ocean_edges[start_node]['neighbours'].index(end_node)
#print('index', ocean_edges.neigbors[start_node].index(end_node))
del ocean_edges[start_node]['neighbours'][index]
del ocean_edges[start_node]['cost'][index]
a = a_star_search(ocean_edges, start_node, end_node)
if not list(a)[0]:
print('not reachable')
ocean_edges = copy.deepcopy(ocean_edges2)
# dont delete edge
for k in ocean_edges.keys():
ax[xx[c], yy[c]].scatter(list(ocean_edges[k]['coordinates'])[0], list(ocean_edges[k]['coordinates'])[1], marker='o', color='red')
ax[xx[c], yy[c]].annotate(str(k), ocean_edges[k]['coordinates'])
for n in ocean_edges[k]['neighbours']:
ax[xx[c], yy[c]].arrow(
list(ocean_edges[k]['coordinates'])[0],
list(ocean_edges[k]['coordinates'])[1],
(list(ocean_edges[n]['coordinates'])[0]-list(ocean_edges[k]['coordinates'])[0])/1.2,
(list(ocean_edges[n]['coordinates'])[1]-list(ocean_edges[k]['coordinates'])[1])/1.2,
color='blue', head_width=0.1
)
c += 1
print('for edge ', i, ': ', a)
plt.show()
|
from sysconfig import get_platform
from setuptools import setup, Extension
with open('README.rst') as fd:
long_description = fd.read()
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: Implementation :: CPython']
libs = ['user32', 'shell32', 'comctl32', 'gdi32']
if get_platform().startswith('mingw'):
libs.append('python3.6m')
else:
libs.append('python3')
_notify = Extension(
'notify._notify',
sources=['src/_notify.c'],
libraries=libs)
setup(
name='winnotify',
version='0.1.0',
description='C extension to show native Windows notifications.',
long_description=long_description,
url='https://github.com/OzymandiasTheGreat/winnotify',
author='Tomas Ravinskas',
author_email='tomas.rav@gmail.com',
classifiers=classifiers,
packages=['notify'],
ext_modules=[_notify])
|
s=set({1,2,3})
p=set({4,5,6})
print(p)
'''s.add(1)
s.add(2)
s.add(3)'''
s1=s.union({1,2,3})
print(s,s1)
print(s.isdisjoint(p))
s.union()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from future import standard_library
standard_library.install_aliases()
import sys
from nose.tools import assert_equal
from openpyxl import load_workbook
from desktop.lib.export_csvxls import create_generator, make_response
if sys.version_info[0] > 2:
from io import BytesIO as string_io
else:
from cStringIO import StringIO as string_io
def content_generator(header, data):
yield header, data
def test_export_csv():
headers = ["x", "y"]
data = [ ["1", "2"], ["3", "4"], ["5,6", "7"], [None, None], ["http://gethue.com", "http://gethue.com"] ]
# Check CSV
generator = create_generator(content_generator(headers, data), "csv")
response = make_response(generator, "csv", "foo")
assert_equal("application/csv", response["content-type"])
content = b''.join(response.streaming_content)
assert_equal(b'x,y\r\n1,2\r\n3,4\r\n"5,6",7\r\nNULL,NULL\r\nhttp://gethue.com,http://gethue.com\r\n', content)
assert_equal('attachment; filename="foo.csv"', response["content-disposition"])
# Check non-ASCII for any browser except FF or no browser info
generator = create_generator(content_generator(headers, data), "csv")
response = make_response(generator, "csv", u'gんtbhんjk?¥n')
assert_equal("application/csv", response["content-type"])
content = b''.join(response.streaming_content)
assert_equal(b'x,y\r\n1,2\r\n3,4\r\n"5,6",7\r\nNULL,NULL\r\nhttp://gethue.com,http://gethue.com\r\n', content)
assert_equal('attachment; filename="g%E3%82%93tbh%E3%82%93jk%EF%BC%9F%EF%BF%A5n.csv"', response["content-disposition"])
# Check non-ASCII for FF browser
generator = create_generator(content_generator(headers, data), "csv")
response = make_response(
generator, "csv", u'gんtbhんjk?¥n',
user_agent='Mozilla / 5.0(Macintosh; Intel Mac OS X 10.12;rv:59.0) Gecko / 20100101 Firefox / 59.0)'
)
assert_equal("application/csv", response["content-type"])
content = b''.join(response.streaming_content)
assert_equal(b'x,y\r\n1,2\r\n3,4\r\n"5,6",7\r\nNULL,NULL\r\nhttp://gethue.com,http://gethue.com\r\n', content)
assert_equal(
'attachment; filename*="g%E3%82%93tbh%E3%82%93jk%EF%BC%9F%EF%BF%A5n.csv"',
response["content-disposition"]
)
def test_export_xls():
headers = ["x", "y"]
data = [["1", "2"], ["3", "4"], ["5,6", "7"], [None, None], ["http://gethue.com", "http://gethue.com"]]
sheet = [headers] + data
# Check XLS
generator = create_generator(content_generator(headers, data), "xls")
response = make_response(generator, "xls", "foo")
assert_equal("application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", response["content-type"])
expected_data = [[cell is not None and cell.replace("http://gethue.com", '=HYPERLINK("http://gethue.com")') or "NULL" for cell in row] for row in sheet]
sheet_data = _read_xls_sheet_data(response)
assert_equal(expected_data, sheet_data)
assert_equal('attachment; filename="foo.xlsx"', response["content-disposition"])
def _read_xls_sheet_data(response):
content = bytes(response.content)
data = string_io()
data.write(content)
wb = load_workbook(filename=data, read_only=True)
ws = wb.active
return [[cell.value if cell else cell for cell in row] for row in ws.rows]
|
from flask import (
Flask, Response, render_template, request, g, abort, make_response
)
from mock import patch, MagicMock
from openc2 import Command, Response as OpenC2Response
from libcloud.compute.base import NodeImage, NodeSize, Node
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import itertools
import json
import traceback
from frontend import _seropenc2, _deseropenc2, _instcmds
from frontend import CREATE, START, STOP, DELETE, NewContextAWS
app = Flask(__name__)
import logging
#app.logger.setLevel(logging.DEBUG)
if True:
# GCE
provider = Provider.GCE
gcpkey = '.gcp.json'
with open(gcpkey) as fp:
email = json.load(fp)['client_email']
driverargs = (email, gcpkey)
driverkwargs = dict(project='openc2-cloud-261123', region='us-west-1')
createnodekwargs = dict(location='us-central1-a', size='f1-micro')
# freebsd-12-0-release-amd64
else:
# EC2
access_key, secret_key = open('.keys').read().split()
provider = Provider.EC2
driverargs = (access_key, secret_key)
driverkwargs = dict(region='us-west-2')
sizeobj = MagicMock()
sizeobj.id = 't2.nano'
createnodekwargs = dict(size=sizeobj)
def genresp(oc2resp, command_id):
'''Generate a response from a Response.'''
# be explicit about encoding, the automatic encoding is undocumented
body = _seropenc2(oc2resp).encode('utf-8')
r = Response(response=body, status=oc2resp.status,
headers={ 'X-Request-ID': command_id },
mimetype='application/openc2-rsp+json;version=1.0')
return r
class CommandFailure(Exception):
status_code = 400
def __init__(self, cmd, msg, command_id, status_code=None):
self.cmd = cmd
self.msg = msg
self.command_id = command_id
if status_code is not None:
self.status_code = status_code
@app.errorhandler(CommandFailure)
def handle_commandfailure(err):
resp = OpenC2Response(status=err.status_code, status_text=err.msg)
return genresp(resp, err.command_id)
nameiter = ('openc2test-%d' % i for i in itertools.count(1))
@app.route('/', methods=['GET', 'POST'])
@app.route('/ec2', methods=['GET', 'POST'])
def ec2route():
app.logger.debug('received msg: %s' % repr(request.data))
try:
cmdid = request.headers['X-Request-ID']
except KeyError:
resp = make_response('missing X-Request-ID header'.encode('us-ascii'), 400)
resp.charset = 'us-ascii'
resp.mimetype = 'text/plain'
return resp
req = _deseropenc2(request.data)
ncawsargs = {}
status = 200
clddrv = get_clouddriver()
try:
if hasattr(req.target, 'instance'):
inst = req.target.instance
if request.method == 'POST' and req.action == CREATE:
ami = req.target['image']
img = MagicMock()
img.id = ami
try:
inst = req.target.instance
except AttributeError:
inst = next(nameiter)
r = clddrv.create_node(image=img,
name=inst, **createnodekwargs)
inst = r.name
app.logger.debug('started ami %s, instance id: %s' % (ami, inst))
res = inst
ncawsargs['instance'] = inst
elif request.method == 'POST' and req.action == START:
get_node(inst).start()
res = ''
elif request.method == 'POST' and req.action == STOP:
if not get_node(inst).stop_node():
raise RuntimeError(
'unable to stop instance: %s' % repr(inst))
res = ''
elif request.method == 'POST' and req.action == DELETE:
get_node(inst).destroy()
res = ''
elif request.method in ('GET', 'POST') and req.action == 'query':
insts = [ x for x in clddrv.list_nodes() if
x.name == inst ]
if insts:
res = str(insts[0].state)
else:
res = 'instance not found'
status = 404
else:
raise Exception('unhandled request')
except Exception as e:
app.logger.debug('generic failure: %s' % repr(e))
app.logger.debug(traceback.format_exc())
raise CommandFailure(req, repr(e), cmdid)
if ncawsargs:
kwargs = dict(results=NewContextAWS(**ncawsargs))
else:
kwargs = {}
resp = OpenC2Response(status=status, status_text=res, **kwargs)
app.logger.debug('replied msg: %s' % repr(_seropenc2(resp)))
resp = make_response(_seropenc2(resp))
# Copy over the command id from the request
resp.headers['X-Request-ID'] = request.headers['X-Request-ID']
return resp
def get_node(instname):
return [ x for x in get_clouddriver().list_nodes() if
x.name == instname ][0]
def get_clouddriver():
if not hasattr(g, 'driver'):
cls = get_driver(provider)
g.driver = cls(*driverargs, **driverkwargs)
return g.driver
import unittest
from libcloud.compute.drivers.dummy import DummyNodeDriver
from libcloud.compute.base import Node
from libcloud.compute.types import NodeState
class BetterDummyNodeDriver(DummyNodeDriver):
def __init__(self, *args, **kwargs):
self._numiter = itertools.count(1)
return super(BetterDummyNodeDriver, self).__init__(*args, **kwargs)
def create_node(self, **kwargs):
num = next(self._numiter)
sizename = kwargs.pop('size', 'defsize')
name = kwargs.pop('name', 'dummy-%d' % (num))
n = Node(id=num,
name=name,
state=NodeState.RUNNING,
public_ips=['127.0.0.%d' % (num)],
private_ips=[],
driver=self,
size=NodeSize(id='s1', name=sizename, ram=2048,
disk=160, bandwidth=None, price=0.0,
driver=self),
image=NodeImage(id='i2', name='image', driver=self),
extra={'foo': 'bar'})
self.nl.append(n)
return n
def stop_node(self, node):
node.state = NodeState.STOPPED
return True
def start_node(self, node):
node.state = NodeState.RUNNING
return True
def _selfpatch(name):
return patch('%s.%s' % (__name__, name))
class BackendTests(unittest.TestCase):
def setUp(self):
self.test_client = app.test_client(self)
def test_genresp(self):
res = 'soijef'
cmdid = 'weoiudf'
resp = OpenC2Response(status=400)
# that a generated response
r = genresp(resp, command_id=cmdid)
# has the passed in status code
self.assertEqual(r.status_code, 400)
# has the correct mime-type
self.assertEqual(r.content_type, 'application/openc2-rsp+json;version=1.0')
# has the correct body
self.assertEqual(r.data, _seropenc2(resp).encode('utf-8'))
# and the command id in the header
self.assertEqual(r.headers['X-Request-ID'], cmdid)
# that a generated response
resp = OpenC2Response(status=200)
r = genresp(resp, cmdid)
# has the passed status code
self.assertEqual(r.status_code, 200)
def test_cmdfailure(self):
cmduuid = 'weoiud'
ami = 'owiejp'
failmsg = 'this is a failure message'
cmd = Command(action=CREATE, target=NewContextAWS(image=ami))
oc2resp = OpenC2Response(status=500, status_text=failmsg)
# that a constructed CommandFailure
failure = CommandFailure(cmd, failmsg, cmduuid, 500)
# when handled
r = handle_commandfailure(failure)
# has the correct status code
self.assertEqual(r.status_code, 500)
# has the correct mime-type
self.assertEqual(r.content_type, 'application/openc2-rsp+json;version=1.0')
# has the correct body
self.assertEqual(r.data, _seropenc2(oc2resp).encode('utf-8'))
# and the command id in the header
self.assertEqual(r.headers['X-Request-ID'], cmduuid)
# that a constructed CommandFailure
failure = CommandFailure(cmd, failmsg, cmduuid, 500)
# when handled
r = handle_commandfailure(failure)
# has the correct status code
self.assertEqual(r.status_code, 500)
@_selfpatch('get_driver')
@_selfpatch('open')
def test_getclouddriver(self, op, drvmock):
with app.app_context():
# That the client object gets returned
self.assertIs(get_clouddriver(), drvmock()())
# that the class for the correct provider was obtained
drvmock.assert_any_call(provider)
# and that the driver was created with the correct arguments
drvmock().assert_any_call(*driverargs, **driverkwargs)
# reset provider class mock
drvmock().reset_mock()
# and does no additional calls
drvmock().assert_not_called()
# that a second call returns the same object
self.assertIs(get_clouddriver(), drvmock()())
def test_nocmdid(self):
# That a request w/o a command id
response = self.test_client.post('/ec2', data='bogus')
# that it fails
self.assertEqual(response.status_code, 400)
# that it says why
self.assertEqual(response.headers['content-type'], 'text/plain; charset=us-ascii')
# that it says why
self.assertEqual(response.data, 'missing X-Request-ID header'.encode('utf-8'))
@_selfpatch('nameiter')
@_selfpatch('get_clouddriver')
def test_create(self, drvmock, nameiter):
cmduuid = 'someuuid'
ami = 'Ubuntu 9.10'
instname = 'somename'
# that the name is return by nameiter
nameiter.__next__.return_value = instname
cmd = Command(action=CREATE, target=NewContextAWS(image=ami))
# Note that 0, creates two nodes, not zero, so create one instead
dnd = BetterDummyNodeDriver(1)
dnd.list_nodes()[0].destroy()
self.assertEqual(len(dnd.list_nodes()), 0)
drvmock.return_value = dnd
# That a request to create a command
response = self.test_client.post('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# Is successful
self.assertEqual(response.status_code, 200)
# and returns a valid OpenC2 response
dcmd = _deseropenc2(response.data)
# that the status is correct
self.assertEqual(dcmd.status, 200)
# and that the image was run
self.assertEqual(len(dnd.list_nodes()), 1)
# and has the correct instance id
node = dnd.list_nodes()[0]
runinstid = node.name
self.assertEqual(runinstid, instname)
self.assertEqual(dcmd.results['instance'], runinstid)
# and was launched w/ the correct size
self.assertEqual(node.size.name, createnodekwargs['size'])
# and has the same command id
self.assertEqual(response.headers['X-Request-ID'], cmduuid)
# clean up previously launched instance
dnd.list_nodes()[0].destroy()
# That a request to create a command w/ instance name
instname = 'anotherinstancename'
cmd = Command(action=CREATE, target=NewContextAWS(image=ami,
instance=instname))
response = self.test_client.post('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# Is successful
self.assertEqual(response.status_code, 200)
# and returns a valid OpenC2 response
dcmd = _deseropenc2(response.data)
# that the status is correct
self.assertEqual(dcmd.status, 200)
# and that the image was run
self.assertEqual(len(dnd.list_nodes()), 1)
# and has the correct instance id
node = dnd.list_nodes()[0]
runinstid = node.name
self.assertEqual(runinstid, instname)
self.assertEqual(dcmd.results['instance'], runinstid)
# That when we get the same command as a get request
response = self.test_client.get('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# that it fails
self.assertEqual(response.status_code, 400)
@_selfpatch('get_clouddriver')
def test_query(self, drvmock):
cmduuid = 'someuuid'
dnd = BetterDummyNodeDriver(1)
drvmock.return_value = dnd
# Get the existing instance id
node = dnd.list_nodes()[0]
instid = node.name
cmd = Command(action='query', target=NewContextAWS(instance=instid))
# That a request to query a command
response = self.test_client.get('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# Is successful
self.assertEqual(response.status_code, 200)
# and returns a valid OpenC2 response
dcmd = _deseropenc2(response.data)
# and matches the node state
self.assertEqual(dcmd.status_text, node.state)
# and has the same command id
self.assertEqual(response.headers['X-Request-ID'], cmduuid)
# that when the instance does not exist
dnd.list_nodes()[0].destroy()
# That a request to query a command the returns nothing
response = self.test_client.get('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# Is successful
self.assertEqual(response.status_code, 200)
# and returns a valid OpenC2 response
dcmd = _deseropenc2(response.data)
# and that the status is 404 (instance not found)
self.assertEqual(dcmd.status, 404)
# and has the instance id
self.assertEqual(dcmd.status_text, 'instance not found')
# That when we post the same command as a get request
response = self.test_client.post('/ec2',
data=_seropenc2(cmd))
# that it fails
self.assertEqual(response.status_code, 400)
@_selfpatch('get_clouddriver')
def test_start(self, drvmock):
cmduuid = 'someuuid'
dnd = BetterDummyNodeDriver(1)
drvmock.return_value = dnd
# Get the existing instance id
instid = dnd.list_nodes()[0].name
node = dnd.list_nodes()[0]
node.stop_node()
self.assertEqual(node.state, NodeState.STOPPED)
cmd = Command(action=START,
target=NewContextAWS(instance=instid))
# That a request to start an instance
response = self.test_client.post('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# Is successful
self.assertEqual(response.status_code, 200)
# and returns a valid OpenC2 response
dcmd = _deseropenc2(response.data)
# and has the same command id
self.assertEqual(response.headers['X-Request-ID'], cmduuid)
# and that the image was started
self.assertEqual(node.state, NodeState.RUNNING)
# That when we get the same command as a get request
response = self.test_client.get('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# that it fails
self.assertEqual(response.status_code, 400)
@_selfpatch('get_clouddriver')
def test_stop(self, drvmock):
cmduuid = 'someuuid'
dnd = BetterDummyNodeDriver(1)
drvmock.return_value = dnd
# Get the existing instance id
instid = dnd.list_nodes()[0].name
node = dnd.list_nodes()[0]
self.assertEqual(node.state, NodeState.RUNNING)
cmd = Command(allow_custom=True, action=STOP,
target=NewContextAWS(instance=instid))
# That a request to stop an instance
response = self.test_client.post('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# Is successful
self.assertEqual(response.status_code, 200)
# and returns a valid OpenC2 response
dcmd = _deseropenc2(response.data)
# and has the same command id
self.assertEqual(response.headers['X-Request-ID'], cmduuid)
# and that the image was stopped
self.assertEqual(node.state, NodeState.STOPPED)
# That when we get the same command as a get request
response = self.test_client.get('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# that it fails
self.assertEqual(response.status_code, 400)
with patch.object(dnd, 'stop_node') as sn:
# that when a stop command
cmd = Command(action=STOP,
target=NewContextAWS(instance=instid))
# and it returns an error
sn.return_value = False
# That a request to stop an instance
response = self.test_client.post('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# fails
self.assertEqual(response.status_code, 400)
# that it has a Response body
resp = _deseropenc2(response.data)
# that it is an ERR
self.assertEqual(resp.status, 400)
# that it references the correct command
self.assertEqual(response.headers['X-Request-ID'], cmduuid)
@_selfpatch('get_clouddriver')
def test_delete(self, drvmock):
#terminate_instances
cmduuid = 'someuuid'
dnd = BetterDummyNodeDriver(1)
drvmock.return_value = dnd
# Get the existing instance id
instid = dnd.list_nodes()[0].name
node = dnd.list_nodes()[0]
cmd = Command(action=DELETE,
target=NewContextAWS(instance=instid))
# That a request to create a command
response = self.test_client.post('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# Is successful
self.assertEqual(response.status_code, 200)
# and returns a valid OpenC2 response
dcmd = _deseropenc2(response.data)
# and has the same command id
self.assertEqual(response.headers['X-Request-ID'], cmduuid)
# and that the image was terminated
self.assertEqual(node.state, NodeState.TERMINATED)
# That when we get the same command as a get request
response = self.test_client.get('/ec2', data=_seropenc2(cmd),
headers={ 'X-Request-ID': cmduuid })
# that it fails
self.assertEqual(response.status_code, 400)
|
from django.core.management.base import BaseCommand
from PiControl.models import Schedule
import rollbar
from django.conf import settings
class Command(BaseCommand):
help = 'cron for schedule'
def handle(self, *args, **options):
rollbar.init(settings.ROLLBAR['access_token'])
schedules = Schedule.objects.filter(active=True)
for schedule in schedules:
schedule.activate()
self.stdout.write(self.style.SUCCESS('Success')) |
"for"
for i in ["dubian","gomez",3]:
print("hola",end=" ")
for i in "cosas que pasan ":
print("hola",end=" ")
email = False
for i in "dubian@cosasquepasan":
if i == "@":
email=True
if email:
print("es correcto")
else:
print("no es correcto")
email2 = True
mi_i = input("introduce tu email: ")
for i in mi_i:
if i == "@":
email2=True
if email2:
print("es correcto")
else:
print("no es correcto")v
email2 = 0
mi_i = input("introduce tu email: ")
for i in mi_i:
if i == "@" or i ==".":
email2+=1
if email2==1:
print("es correcto")
else:
print("no es correcto")
"range"
for i in range(5):
print(f"valor de la variable {i}")
for i in range(5,50):
print(f"valor de la variable {i}")
for i in range(5,50,3):
print(f"valor de la variable {i}")
len("juan")
v = False
e = input("introduce tu email: ")
for i in range(len(e)):
if e[i]=="@":
v=True
if v:
print("bien")
else:
print("mal")
len(e)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class AutoItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
price = scrapy.Field()
mileage = scrapy.Field()
exterior = scrapy.Field()
interior = scrapy.Field()
color = scrapy.Field()
car_type = scrapy.Field()
body_style = scrapy.Field()
fuel_type = scrapy.Field()
stock = scrapy.Field()
vin = scrapy.Field()
engine = scrapy.Field()
transmission = scrapy.Field()
drivetrain = scrapy.Field()
doors = scrapy.Field()
title = scrapy.Field()
dealer = scrapy.Field()
dealer_url = scrapy.Field()
phone = scrapy.Field()
images = scrapy.Field()
city_mpg = scrapy.Field()
highway_mpg = scrapy.Field()
url = scrapy.Field()
|
path = r'C:\Users\admin\Desktop\TestData.properties'
with open(path, 'r', encoding='UTF-8') as f:
li = f.readlines()
with open(path, 'w', encoding='UTF-8') as w:
for line in li:
if line.startswith('en_us.common.common.connectbrowser.url.value'):
w.write('en_us.common.common.connectbrowser.url.value = https:/12.12.12.12/ui/\n')
else:
w.write(line)
|
import tensorflow as tf
import model.layer as layers
import util.operation as op
from model.attr_net import Attr_Net
from model.senti_net import Senti_Net
class Joint_Net(object):
def __init__(self, config):
self.graph = tf.Graph()
self.config = config
self.A_Net = Attr_Net(self.config)
self.S_Net = Senti_Net(self.config)
def build_graph(self):
if self.config['rand_seed'] is not None:
rand_seed = self.config['rand_seed']
tf.set_random_seed(rand_seed)
print('set tf random seed: %s' % self.config['rand_seed'])
with self.graph.as_default():
self.review, self.attr_label, self.senti_label, self.is_training, self.table = layers.joint_net_input(self.config)
# convert table to variable
table_v = tf.Variable(self.table, name='table')
# review_embed : (batch, rev_len, sent_len, emb_dim)
review_embed = tf.nn.embedding_lookup(table_v, self.review)
# rev_len:(batch,)
# sent_len:(batch, rev)
rev_len, sent_len = op.generate_mask(self.config, self.review)
with tf.variable_scope('attr_net',reuse=tf.AUTO_REUSE):
self.A_Net.build(review_embed, rev_len, sent_len, self.attr_label)
with tf.variable_scope('senti_net',reuse=tf.AUTO_REUSE):
self.S_Net.build(review_embed, rev_len, sent_len, self.A_Net.doc_att, self.attr_label, self.senti_label)
self.joint_loss = self.A_Net.loss + self.S_Net.loss
# Calculate cross-entropy loss
tv = tf.trainable_variables()
for v in tv:
print(v)
self.global_step = tf.Variable(0, trainable=False)
initial_learning_rate = self.config['learning_rate']
self.learning_rate = tf.train.exponential_decay(
initial_learning_rate,
global_step=self.global_step,
decay_steps=300,
decay_rate=0.9,
staircase=True)
Optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.optimizer = Optimizer.minimize(
self.joint_loss,
global_step=self.global_step)
self.init = tf.global_variables_initializer()
self.saver = tf.train.Saver(max_to_keep=self.config["max_to_keep"])
self.all_variables = tf.global_variables()
self.grads_and_vars = Optimizer.compute_gradients(self.joint_loss)
for grad, var in self.grads_and_vars:
if grad is None:
print(var)
self.capped_gvs = [(tf.clip_by_value(grad, -1, 1), var) for grad, var in self.grads_and_vars]
self.g_updates = Optimizer.apply_gradients(
self.capped_gvs,
global_step=self.global_step)
return self.graph |
from cryptography.fernet import Fernet
key = Fernet.generate_key()
file = open("encryption_key.txt", 'wb')
file.write(key)
file.close() |
vec = []
with open("sorted.txt", "r") as infile:
for line in infile:
words = line.split()
vec.append(words[0])
dic = {}
out = []
with open("processed.tsv", "r") as infile, open("vectors.tsv", "w") as outfile:
count = 1
for line in infile:
for entry in vec:
dic[entry] = 0
rev = line.split("\t")
for item in range(2,len(rev)-1):
word = rev[item]
if word in dic:
dic[word] += 1
for key in dic:
out.append(dic[key])
for value in out:
outfile.write(str(value)+'\t')
#outfile.write(str(out))
outfile.write(rev[1]+"\n")
count += 1
#if count == 3:
# break
if (count % 1000) == 0:
print("Processed line: " + str(count))
|
from ... import api as Z
class Layer(object):
"""
Object that pairs a tensor transformation with the state it uses.
"""
def __init__(self, x_sigs=None, y_sigs=None):
if x_sigs is None:
xsnd = None
else:
assert isinstance(x_sigs, list)
xsnds = []
for x_sig in x_sigs:
xsnd = x_sig.spatial_ndim_or_none()
if xsnd is not None:
xsnds.append(xsnd)
if xsnds:
assert len(set(xsnds)) == 1
xsnd = xsnds[0]
else:
xsnd = None
if y_sigs is None:
y_sigs = x_sigs
self._x_sigs = x_sigs
self._y_sigs = y_sigs
self._params = []
def x_sigs(self):
return self._x_sigs
def y_sigs(self):
return self._y_sigs
def params(self):
return self._params
def param(self, x, learned=True):
if x is None:
return None
if learned:
x = Z.variable(x)
self._params.append(x)
else:
x = Z.constant(x)
return x
def forward(self, xx, is_training):
raise NotImplementedError
class XYLayer(Layer):
"""
Layer that transforms one input into one output.
"""
def __init__(self, x_sig, y_sig=None):
x_sigs = [x_sig]
if y_sig is None:
y_sigs = None
else:
y_sigs = [y_sig]
Layer.__init__(self, x_sigs, y_sigs)
def forward_x_y(self, x, is_training):
raise NotImplementedError
def forward(self, xx, is_training):
assert len(xx) == 1
x, = xx
y = self.forward_x_y(x, is_training)
return [y]
class XXYLayer(Layer):
"""
Layer that transforms multiple inputs into one output.
"""
def __init__(self, x_sigs, y_sig):
y_sigs = [y_sig]
Layer.__init__(self, x_sigs, y_sigs)
def forward_xx_y(self, x, is_training):
raise NotImplementedError
def forward(self, xx, is_training):
y = self.forward_xx_y(xx, is_training)
return [y]
|
import spotipy
import spotipy.util as util
def Spotify_Client(path):
with open(path, "r") as f:
username, client_id, client_secret = [x.strip() for x in f]
redirect_uri = "http://localhost:8000"
scope = "playlist-read-private playlist-read-collaborative user-read-recently-played user-library-read user-top-read"
token = util.prompt_for_user_token(username,scope,client_id,client_secret,redirect_uri)
sp = spotipy.Spotify(auth=token)
return sp |
from dal import autocomplete
from django.db.models import Q
from django.shortcuts import render
from django.utils import six
from django.utils.decorators import method_decorator
from django.views import generic
from django.views.decorators.cache import never_cache
from .models import DocumentTemplate
from .forms import MyCustomForm
class DocumentTemplateFormView(generic.FormView):
template_name = 'core/document_form.html'
form_class = MyCustomForm
class DocumentTemplateAutocomplete(autocomplete.Select2QuerySetView):
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(DocumentTemplateAutocomplete, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
qs = DocumentTemplate.objects.all()
if self.q:
qs = qs.filter(Q(name__icontains=self.q))
return qs
def get_result_label(self, result):
return six.text_type(result.name)
|
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as charts
import numpy as numpy
import itertools as tools
def plot_learning_curves(train_losses, valid_losses, train_accuracies, valid_accuracies):
graph, labels = charts.subplots(1, 2, figsize = (20, 10))
labels[0].set_title('Loss Curves')
labels[0].plot(train_losses, 'C0', label ='Training Loss')
labels[0].plot(valid_losses, 'C1', label ='Validation Loss')
labels[0].legend(loc ="upper right")
labels[0].set_xlabel("Epoch")
labels[0].set_ylabel("Loss")
labels[1].set_title('Accuracy Curves')
labels[1].plot(train_accuracies, 'C0', label ='Training Accuracy')
labels[1].plot(valid_accuracies, 'C1', label ='Validation Accuracy')
labels[1].legend(loc ="upper left")
labels[1].set_xlabel("Epoch")
labels[1].set_ylabel("Accuracy")
graph.savefig('Learning_Curve.png')
def plot_confusion_matrix(results, class_names):
def plot_matrix(cm, labels, chart_name ='conf_matrix_internal_function'):
cm = cm.astype('float')/cm.sum(axis = 1)[:, numpy.newaxis]
charts.imshow(cm, interpolation='nearest', cmap = charts.cm.Blues)
charts.title(chart_name)
charts.colorbar()
checkered_boxes = numpy.arange(len(labels))
charts.xticks(checkered_boxes, labels, rotation = 45)
charts.yticks(checkered_boxes, labels)
formatType = '.2f'
maxLimit = cm.max() / 2.
for x, y in tools.product(range(cm.shape[0]), range(cm.shape[1])):
charts.text(y, x, format(cm[x, y], formatType), horizontalalignment = "center", color = "white" if cm[x, y] > maxLimit else "black")
charts.ylabel('True')
charts.xlabel('Predicted')
charts.tight_layout()
true_y_label, pred_y_label = zip(* results)
#print(true_y_label)
#print("_______________________________________________________________________")
#print(pred_y_label)
internalMatrixFunction = confusion_matrix(true_y_label, pred_y_label)
numpy.set_printoptions(precision = 2)
charts.figure()
plot_matrix(internalMatrixFunction, labels = class_names, chart_name ='Normalized Confusion Matrix')
charts.savefig("Confusion_Matrix.png") |
from rest_framework import mixins
from rest_framework.generics import GenericAPIView
# not used anymore. just use an existing API view and add the required mixin to add functionality
class CreateOrDestroyView(mixins.CreateModelMixin,
mixins.DestroyModelMixin,
GenericAPIView):
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
|
#!/usr/bin/python3
import os
import argparse
import time
import urllib.request
import email.mime.text
import socket
import getpass
import subprocess
def lookup_mac_vendor(mac_address):
result = ""
if mac_address:
for i in range(3):
vendor = None
try:
# Only first 3 octets are needed for manufacturer identification
# The rest is ommited for privacy reasons
response = urllib.request.urlopen("https://api.macvendors.com/" + mac_address[:8])
vendor = response.read().decode(response.info().get_param('charset') or 'utf-8')
except:
time.sleep(1)
if vendor:
result = vendor
break
return(result)
def send_mail(vendor):
msg = email.mime.text.MIMEText(
"IP: {ip}\n"
"Static: {is_static}\n"
"MAC: {mac}\n"
"Vendor: {vendor}\n"
"Host: {host}\n"
"Static host name: {static}".format(
ip=options.ip,
is_static="yes" if options.is_static == "1" else "no",
mac=options.mac,
vendor=vendor,
host=options.host_name,
static=options.static_host_name
)
)
msg['From'] = getpass.getuser() + "@" + socket.gethostname()
msg['To'] = options.mail_to
msg['Subject'] = "New DHCP lease for {} on {}".format(options.host_name, socket.gethostname())
proc = subprocess.Popen(["/usr/sbin/sendmail", "-t", "-oi"], stdin=subprocess.PIPE)
proc.communicate(bytes(msg.as_string(), "UTF-8"))
def forked_main():
vendor = lookup_mac_vendor(options.mac)
mac_is_known = False
if options.mac:
if os.path.isfile(options.known_macs_file):
with open(options.known_macs_file) as f:
for line in f:
if line.rstrip().upper() == options.mac.upper():
mac_is_known = True
if not mac_is_known:
send_mail(vendor)
if options.mac:
with open(options.known_macs_file, "a") as f:
f.write(options.mac + "\n")
parser = argparse.ArgumentParser(description="New DHCP lease event handler")
parser.add_argument("known_macs_file", help="Known MAC-address list file")
parser.add_argument("ip", nargs="?", default="", help="IP address")
parser.add_argument("mac", nargs="?", default="", help="MAC address")
parser.add_argument("host_name", nargs="?", default="", help="Name of the client")
parser.add_argument("static_host_name", nargs="?", default="", help="Matched static host name")
parser.add_argument("is_static", nargs="?", default="", help="IP is static (0 or 1)")
parser.add_argument('-n', '--no-fork', dest='no_fork', action='store_true',
default=False, help='Do not fork a child process')
parser.add_argument('-m', '--mail-to', dest='mail_to', default="root",
help='Mail message recipient (default: root)', metavar="mail")
options = parser.parse_args()
if options.no_fork:
print("--no-fork option is specified. Running in foreground")
print("IP: {}".format(options.ip))
print("MAC: {}".format(options.mac))
print("Mail to: {}".format(options.mail_to))
forked_main()
elif os.fork() == 0:
forked_main()
os._exit(0)
|
from enum import Enum
from typing import List
from pydantic import BaseModel
class MoveFileStrategy(str, Enum):
"""Strategy available for file move"""
OVERWRITE = 'overwrite'
RENAME = 'rename'
class MoveFileRequest(BaseModel):
"""Request data for file move"""
files: List[str]
destination: str
strategy: MoveFileStrategy
class ZipFileRequest(BaseModel):
"""Request data for zipping files"""
files: List[str]
destination: str
filename: str
|
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_bcrypt import Bcrypt
from sqlalchemy import text
import base64
import uuid
from datetime import datetime
from sqlalchemy import create_engine
from sqlalchemy.sql import text
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']='postgresql://postgres:123456@localhost:5432/ibanking'
app.config['SECRET_KEY']='secret'
db = SQLAlchemy(app)
bcrypt = Bcrypt(app)
conn_str = 'postgresql://postgres:123456@localhost:5432/ibanking'
engine = create_engine(conn_str, echo=False)
#################################################################################################
class Userz(db.Model):
user_id = db.Column(db.String, primary_key = True, nullable = False) # PK
user_name = db.Column(db.String(25), nullable = False, unique = True)
full_name = db.Column(db.String(45), nullable = False)
email = db.Column(db.String(45), nullable = False, unique = True)
password = db.Column(db.String, nullable = False)
is_admin = db.Column(db.Boolean, default = False)
user_acc = db.relationship('Account', backref = 'x', lazy = 'dynamic') # BR
class Branch(db.Model):
branch_number = db.Column(db.String(4), primary_key = True, nullable = False) # PK
branch_name = db.Column(db.String(25), nullable = False, unique = True)
branch_address = db.Column(db.String(25), nullable = False)
branch_acc = db.relationship('Account', backref = 'y', lazy = 'dynamic') # BR
branch_tra = db.relationship('Transaction', backref = 'yy', lazy = 'dynamic') # BR
class Account(db.Model):
account_number = db.Column(db.Integer, primary_key = True, nullable = False, unique = True) # PK
account_type = db.Column(db.String(25), nullable = False, default = "Regular")
account_balance = db.Column(db.Integer, nullable = False)
last_transaction = db.Column(db.DateTime, nullable = False)
user_id = db.Column(db.String, db.ForeignKey('userz.user_id'), nullable = False) # FK
branch_id = db.Column(db.String, db.ForeignKey('branch.branch_number'), nullable = False) # FK
account_tra = db.relationship('Transaction', backref = 'z', lazy = 'dynamic') # BR
class Transaction(db.Model):
transaction_id = db.Column(db.Integer, primary_key = True, index =True) # PK
transaction_type = db.Column(db.String(25), nullable = False)
transaction_date = db.Column(db.DateTime, nullable = False)
transaction_ammount = db.Column(db.Integer, nullable = False)
transaction_description = db.Column(db.String(255))
transaction_sender = db.Column(db.Integer, nullable = True)
transaction_sender_branch = db.Column(db.String(25), nullable = True)
branch_id = db.Column(db.String, db.ForeignKey('branch.branch_number'), nullable = False) # FK
account_id = db.Column(db.Integer, db.ForeignKey('account.account_number'), nullable = False) # FK
#################################################################################################
# Auth
def get_hash(password):
return bcrypt.generate_password_hash(password).decode('utf-8')
def authz():
token = request.headers.get('Authorization')
token2 = token.replace("Basic ","")
plain = base64.b64decode(token2).decode('utf-8')
auth_data = plain.split(":")
return auth_data
def get_username(auth_data):
username = auth_data[0]
return username
def check_user(auth_data):
user = Userz.query.filter_by(user_name=auth_data[0]).first()
a = False
if user is None :
return a #returns false
def get_password(auth_data):
user = Userz.query.filter_by(user_name=auth_data[0]).first()
password = bcrypt.generate_password_hash(auth_data[1]).decode('utf-8')
hashcheck = bcrypt.check_password_hash(user.password, password)
return hashcheck #returns true if valid
def get_is_admin(auth_data):
user = Userz.query.filter_by(user_name=auth_data[0]).first()
if user.is_admin:
return True
#################################################################################################
# User
def get_userData(id):
return Userz.query.filter_by(user_id=id).first_or_404()
def return_user(u):
return {'User id' : u.user_id,'Username':u.user_name,'Full name':u.full_name, 'Email' : u.email, 'is admin': u.is_admin}
#################################################################################################
# Branch
def get_branchData(id):
return Branch.query.filter_by(branch_number = id).first_or_404()
def return_branch(b):
return {'Branch Number': b.branch_number, 'Branch Name': b.branch_name, 'Branch Address': b.branch_address}
#################################################################################################
# Account
def get_accountData(id):
return Account.query.filter_by(account_number=id).first_or_404()
def return_account(a):
return {'Account Number': a.account_number, "Full Name":a.x.full_name, "Branch ID": a.y.branch_number,
"Account Balance":a.account_balance, "Branch Name":a.y.branch_name, "Last Transaction": a.last_transaction}
#################################################################################################
# Transaction
def get_transactionData(id):
return Transaction.query.filter_by(transaction_id=id).first_or_404()
def return_transaction(t):
acc = Account.query.filter_by(account_number=t.account_id).first()
return {'Transaction ID': t.transaction_id, "Transaction Date": t.transaction_date, "Transaction Type": t.transaction_type,
"Related account":t.account_id, "Full Name": acc.x.full_name}
# def return_save(t):
# return {'Transaction ID': t.transaction_id, 'Transaction Date': t.transaction_date, 'Transaction Type': t.transaction_type,
# "Target Branch":"", "Target Account":"", "Transaction Ammount": t.transaction_ammount, "Account Balance":""}
# def return_transfer(t):
# return {'Transaction ID': t.transaction_id, 'Transaction Date': t.transaction_date, 'Transaction Type': t.transaction_type,
# "From":"" , "Target Branch":"", "Target Account":"", "Transaction Ammount": t.transaction_ammount, "Description": t.transaction_description }
# def return_withdraw(t):
# return {'Transaction ID': t.transaction_id, "Transaction Date": t.transaction_date, "Transaction Type": t.transaction_type,
# "Transaction Ammount": t.transaction_ammount, "Account Balance":""}
#################################################################################################
# Reporting
#################################################################################################
################################## ENDPOINT USERZ ###############################################
#################################################################################################
# make admin
@app.route('/admin/', methods = ["POST"])
def make_admin():
data= request.get_json()
hash = get_hash(data['password'])
u = Userz(
user_name= data['user_name'],
user_id = str(uuid.uuid4()),
full_name= data['full_name'],
email= data['email'],
is_admin= data.get('is_admin', False),
password= hash
)
db.session.add(u)
db.session.commit()
return return_user(u), 201
# list all users (Admin)
@app.route('/users/', methods = ["GET"])
def get_users():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
return jsonify([return_user(user) for user in Userz.query.all()]), 201
# search user by id (Admin)
@app.route('/users/<id>/', methods = ["GET"])
def get_user(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
user = get_userData(id)
return return_user(user), 201
# create new user (Admin)
@app.route('/users/', methods=['POST'])
def create_user():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data = request.get_json()
if not 'user_name' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'user_name is not given'
}), 400
if not 'email' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'email is not given'
}), 400
if not 'full_name' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'user_name is not given'
}), 400
if not 'password' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'password is not given'
}), 400
if len(data['user_name']) < 4:
return jsonify({
'error' : 'Bad Request',
'message' : 'Username must contain a minimum of 4 characters'
}), 400
if len(data['email']) < 6:
return jsonify({
'error' : 'Bad Request',
'message' : 'Email must contain a minimum of 6 characters'
}), 400
if len(data['password']) < 8:
return jsonify({
'error' : 'Bad Request',
'message' : 'Password must contain at least 8 characters'
}), 400
hash = get_hash(data['password'])
u = Userz(
user_name= data['user_name'],
full_name= data['full_name'],
user_id = str(uuid.uuid4()),
email= data['email'],
is_admin= data.get('is_admin', False),
password= hash
)
db.session.add(u)
db.session.commit()
return return_user(u), 201
# update / edit user data by id (Admin)
@app.route('/users/<id>/', methods=['PUT'])
def update_user(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data = request.get_json()
user = get_userData(id)
if 'user_name' in data:
user.user_name=data['user_name']
if 'full_name' in data:
user.full_name=data['full_name']
if 'email' in data:
user.email=data['email']
if 'is admin' in data:
user.is_admin=data['is_admin']
if 'password' in data:
user.password = get_hash(data['password'])
db.session.commit()
return jsonify({'Success': 'User data has been updated'}, return_user(user))
# delete / close user data by id (Admin)
@app.route('/users/<id>/', methods=['DELETE'])
def delete_user(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
user = Userz.query.filter_by(user_id=id).first_or_404()
db.session.delete(user)
db.session.commit()
return {'success': 'User data deleted successfully'}
#################################################################################################
################################## ENDPOINT BRANCH ##############################################
#################################################################################################
# list all branches (Admin)
@app.route('/branch/', methods = ["GET"])
def get_branches():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
return jsonify([return_branch(branches) for branches in Branch.query.all()
]), 201
# search branch by id (Admin)
@app.route('/branch/<id>/', methods = ["GET"])
def get_branch(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
branch = get_branchData(id)
return return_branch(branch), 201
# create new branch (Admin)
@app.route('/branch/', methods=['POST'])
def create_branch():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data = request.get_json()
if not 'branch_name' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'branch_name is not given'
}), 400
if not 'branch_address' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'branch_address is not given'
}), 400
if not 'branch_number' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'branch_number is not given'
}), 400
if len(data['branch_name']) < 4:
return jsonify({
'error' : 'Bad Request',
'message' : 'branch_name must contain a minimum of 4 letters'
}), 400
if len(data['branch_number']) < 4:
return jsonify({
'error' : 'Bad Request',
'message' : 'branch_number must contain a minimum of 4 letters'
}), 400
if len(data['branch_address']) < 6:
return jsonify({
'error' : 'Bad Request',
'message' : 'branch_address must contain a minimum of 6 letters'
}), 400
b = Branch(
branch_name = data['branch_name'],
branch_number = data['branch_number'],
branch_address = data['branch_address']
)
db.session.add(b)
db.session.commit()
return return_branch(b), 201
# update / edit branch by id (Admin)
@app.route('/branch/<id>/', methods=['PUT'])
def update_branch(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data = request.get_json()
branch = get_branchData(id)
if 'branch_name' in data:
branch.branch_name=data['branch_name']
if 'branch_address' in data:
branch.branch_address=data['branch_address']
db.session.commit()
return jsonify({'Success': 'Branch data has been updated'}, return_branch(branch)), 201
# delete / close branch by id (Admin)
@app.route('/branch/<id>/', methods=['DELETE'])
def delete_branch(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
branch = Branch.query.filter_by(branch_id=id).first_or_404()
db.session.delete(branch)
db.session.commit()
return {'success': 'Branch data deleted successfully'}
#################################################################################################
################################## ENDPOINT ACCOUNT #############################################
#################################################################################################
# list all accounts (Admin)
@app.route('/account/', methods = ["GET"])
def get_accounts():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
return jsonify([return_account(accounts) for accounts in Account.query.all()
])
# seach account by id (Admin)
@app.route('/account/<id>/', methods = ["GET"])
def get_account(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
account = get_accountData(id)
return return_account(account)
# create account (Admin)
@app.route('/account/', methods=['POST'])
def create_account():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data = request.get_json()
if not 'deposit' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'deposit is not given'
}), 400
if not 'date' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'date is not given'
}), 400
if not 'account_number' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'account_number is not given'
}), 400
if data["deposit"] < 150000:
return jsonify({
'error' : 'Bad Request',
'message' : 'First deposit must larger than or equal to 150000'
}), 400
else: # >= 150000
a = Account(
account_number = data['account_number'],
account_type = data['account_type'],
account_balance = data['deposit'],
last_transaction = data['date'],
user_id = data["user id"],
branch_id = data["branch_id"]
)
# when opening a new account, this will be counted as deposit in Transaction table
t = Transaction(
transaction_type = "save",
transaction_date = data['date'],
transaction_ammount = data['deposit'],
account_id = data['account_number'],
branch_id = data['branch_id']
)
db.session.add(a)
db.session.add(t)
db.session.commit()
return return_account(a), 201
# update / edit account by id (Admin)
@app.route('/account/<id>/', methods=['PUT'])
def update_account(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data = request.get_json()
account = get_accountData(id)
if 'owner_id' in data:
account.owner_id=data['owner_id']
if 'account_type' in data:
account.account_type=data['account_type']
if 'account_balance' in data:
account_account_balance=data['account_balance']
db.session.commit()
return jsonify({'Success': 'Account data has been updated'}, return_account(account))
# delete account (Admin)
@app.route('/account/<id>/', methods=['DELETE'])
def delete_account(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
account = Account.query.filter_by(account_id=id).first_or_404()
db.session.delete(account)
db.session.commit()
return {
'success': 'This bank account has been closed'
}
#################################################################################################
################################## ENDPOINT TRANSACTION #########################################
#################################################################################################
# List all transactions (Admin)
@app.route('/transaction/', methods = ["GET"])
def get_transactions():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
return jsonify([return_transaction(transactions) for transactions in Transaction.query.all()
])
# Search Transaction by id (Admin)
@app.route('/transaction/<id>/', methods = ["GET"])
def get_transaction(id):
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
# transaction = get_transactionData(id)
trans = Transaction.query.filter_by(transaction_id=id).first_or_404()
acc = Account.query.filter_by(account_number=trans.account_id).first()
# user = Userz.query.filter_by(user_id=user_id).first()
# return return_transaction(transaction)
return jsonify([ {'Transaction ID': trans.transaction_id, "Transaction Date": trans.transaction_date, "Transaction Type": trans.transaction_type,
"Related account":trans.account_id, "Full Name": acc.x.full_name} ])
# save / deposit (Admin)
@app.route('/transaction/deposit/', methods = ["POST"])
def save_money():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data=request.get_json()
if not 'transaction_type' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_type is not given'
}), 400
else:
if data['transaction_type'] == "save":
if not 'transaction_ammount' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_ammount is not given'
}), 400
if not 'transaction_date' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_date is not given'
}), 400
if not 'account_id' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'account_id is not given'
}), 400
if not 'branch_id' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'branch_id is not given'
}), 400
t = Transaction(
transaction_type = data['transaction_type'],
transaction_date = data['transaction_date'],
transaction_ammount = data['transaction_ammount'],
account_id = data['account_id'],
branch_id = data['branch_id']
)
db.session.add(t)
acc = Account.query.filter_by(account_number=data['account_id']).first()
temp = acc.account_balance
temp2 = temp + data['transaction_ammount']
acc.account_balance = temp2
acc.last_transaction = data['transaction_date']
db.session.commit()
branch = Branch.query.filter_by(branch_number=data['branch_id'])
user = Userz.query.filter_by(user_id=acc.user_id)
return jsonify([
{
"1. Transaction summary": {
"Transaction ID": t.transaction_id,
"Transaction Date": t.transaction_date,
"Transaction Type": t.transaction_type,
},
"2. Account information": {
"Account Number": t.account_id,
"Ammount Deposited": data['transaction_ammount'],
"Account Balance": t.z.account_balance,
"Full Name": acc.x.full_name
}
}
]), 201
# Transfer (User)
@app.route('/transaction/transfer/', methods = ["POST"])
def transfer_money():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if get_is_admin(login): #if true (if admin)
return jsonify({
'error' : 'Bad Request',
'message' : "Admins cannot execute user's endpoints"
}), 400
else: #if false (not admin)
data=request.get_json()
if not 'transaction_type' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_type is not given'
}), 400
else:
if data['transaction_type'] == "transfer":
if not 'transaction_ammount' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_ammount is not given'
}), 400
if not 'transaction_date' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_date is not given'
}), 400
if not 'target_account' in data: # target
return jsonify({
'error' : 'Bad Request',
'message' : 'target_account is not given'
}), 400
if not 'target_branch' in data: #target
return jsonify({
'error' : 'Bad Request',
'message' : 'target_branch is not given'
}), 400
if not 'transaction_sender' in data: #sender
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_target is not given'
}), 400
if not 'transaction_sender_branch' in data: #sender
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_target_branch is not given'
}), 400
acc_test = Account.query.filter_by(account_number=data['transaction_sender']).first()
if acc_test.account_balance < data['transaction_ammount']: # insufficent balance
return jsonify({
'error' : 'Bad Request',
'message' : 'Insufficent account balance to do this operation'
}), 400
else:
tempx = acc_test.account_balance
tempx1 = tempx - data['transaction_ammount']
if tempx1 < 150000: # balance will be less than minimum account ballance required
return jsonify({
'error' : 'Bad Request',
'message' : 'Your balance will be less than the minimum account balance required if you do this operation'
}), 400
else:
t = Transaction(
transaction_type = data['transaction_type'], #transfer
transaction_date = data['transaction_date'], #date
transaction_ammount = data['transaction_ammount'], #duit
account_id = data['target_account'],
branch_id = data['target_branch'],
transaction_sender = data['transaction_sender'],
transaction_sender_branch = data['transaction_sender_branch']
)
# add error handling kalo balance transfer > balance account sender !< minimum balance
db.session.add(t)
# update balance sender
acc = Account.query.filter_by(account_number=data['transaction_sender']).first()
# temp = acc.account_balance
# temp2 = temp - data['transaction_ammount']
temp2 = acc.account_balance - data['transaction_ammount']
acc.account_balance = temp2
acc.last_transaction = data['transaction_date']
# update balance receiver
acc1 = Account.query.filter_by(account_number=data['target_account']).first()
# temp = acc1.account_balance
# temp2 = temp + data['transaction_ammount']
temp2 = acc1.account_balance + data['transaction_ammount']
acc1.account_balance = temp2
# commit
db.session.commit()
# data sender
branch = Branch.query.filter_by(branch_number=data['transaction_sender'])
user = Userz.query.filter_by(user_id=acc.user_id)
# data receiver
branch1 = Branch.query.filter_by(branch_number=data['target_branch'])
user1 = Userz.query.filter_by(user_id=acc1.user_id)
return jsonify([
{
"1. Transaction summary": {
"Transaction ID": t.transaction_id,
"Transaction Date": t.transaction_date,
"Transaction Type": t.transaction_type,
},
"2. Sender information": {
"Account Number": t.transaction_sender,
"Ammount Transferred": data['transaction_ammount'],
# "Account Balance": acc.z.account_balance,
"Full Name": acc.x.full_name
},
"3. Receiver information": {
"Account Number": t.account_id,
"Full Name": acc1.x.full_name
}
}
]), 201
# Withdraw (User)
@app.route('/transaction/withdraw/', methods = ["POST"])
def withdraw_money():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if get_is_admin(login): #if true (if admin)
return jsonify({
'error' : 'Bad Request',
'message' : "Admins cannot execute user's endpoints"
}), 400
else: #if false (not admin)
data=request.get_json()
if not 'transaction_type' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_type is not given'
}), 400
else:
if data['transaction_type'] == "withdraw":
if not 'transaction_ammount' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_ammount is not given'
}), 400
if not 'transaction_date' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'transaction_date is not given'
}), 400
if not 'account_id' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'account_id is not given'
}), 400
if not 'branch_id' in data:
return jsonify({
'error' : 'Bad Request',
'message' : 'branch_id is not given'
}), 400
t = Transaction(
transaction_type = data['transaction_type'],
transaction_date = data['transaction_date'],
transaction_ammount = data['transaction_ammount'],
account_id = data['account_id'],
branch_id = data['branch_id']
)
db.session.add(t)
acc = Account.query.filter_by(account_number=data['account_id']).first()
temp = acc.account_balance
temp2 = temp - data['transaction_ammount']
acc.account_balance = temp2
acc.last_transaction = data['transaction_date']
db.session.commit()
branch = Branch.query.filter_by(branch_number=data['branch_id'])
user = Userz.query.filter_by(user_id=acc.user_id)
return jsonify([
{
"1. Transaction summary": {
"Transaction ID": t.transaction_id,
"Transaction Date": t.transaction_date,
"Transaction Type": t.transaction_type,
},
"2. Account information": {
"Account Number": t.account_id,
"Ammount Debitted": data['transaction_ammount'],
"Account Balance": t.z.account_balance,
"Full Name": acc.x.full_name
}
}
]), 201
# See Transaction History by id (User)
@app.route('/transaction/history/', methods = ["GET"])
def get_history():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if get_is_admin(login): #if true (if admin)
return jsonify({
'error' : 'Bad Request',
'message' : "Admins cannot execute user's endpoints"
}), 400
else: #if false (not admin)
# can only view his own data
user = Userz.query.filter_by(user_name = login[0]).first()
account = Account.query.filter_by(user_id = user.user_id).first()
transaction = Transaction.query.filter_by(account_id = account.account_number).all()
# return jsonify([return_transaction(trans) for trans in transaction ])
return jsonify([
{
"Transaction ID" : trans.transaction_id,
"Transaction Type" : trans.transaction_type,
"Transaction Date" : trans.transaction_date,
"Transaction Ammount" : trans.transaction_ammount,
"Transaction Description" : trans.transaction_description,
"Transaction Sender" : trans.transaction_sender,
"Transaction Receiver" : trans.account_id
}for trans in transaction
])
# See Account info / see remaining balance by id (User)
@app.route('/account/user/', methods = ["GET"])
def get_user_account():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if get_is_admin(login): #if true (if admin)
return jsonify({
'error' : 'Bad Request',
'message' : "Admins cannot execute user's endpoints"
}), 400
else: #if false (not admin)
# can only view his own data
user = Userz.query.filter_by(user_name=login[0]).first()
account = Account.query.filter_by(user_id = user.user_id).first()
return return_account(account)
# transfer dan withdraw jangan sampai kurang dari min. balance (150k)
# admin gabisa menjalankan fitur" user (transfer, dll)
#################################################################################################
################################## ENDPOINT REPORTING ###########################################
#################################################################################################
# filter by period (start date / end date) : number of accounts, number of users, total debit, credit, balance
@app.route('/transaction/period/', methods = ["GET"])
def get_by_period():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data = request.get_json()
if not "start_period" in data:
return jsonify({
'error' : 'Bad Request',
'message' : "start_period is not given"
}), 400
if not "end_period" in data:
return jsonify({
'error' : 'Bad Request',
'message' : "end_period is not given"
}), 400
else:
transaction = Transaction.query.filter((Transaction.transaction_date).between(data['start_period'], data['end_period'])).all()
return jsonify([
{
"Transaction ID": trans.transaction_id,
"Transaction Type": trans.transaction_type,
"Transaction Date": trans.transaction_date,
"Transaction ammount": trans.transaction_ammount,
"Transaction sender": trans.transaction_sender,
"Transaction receiver": trans.account_id
} for trans in transaction
])
# account number
@app.route('/account/number/', methods = ["GET"])
def get_account_number():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
return jsonify({
'The number of registered accounts are' : Account.query.count()
}), 201
# total debit (withdraw) (admin)
@app.route('/transaction/debit/', methods = ["GET"])
def get_total_debit():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
# trans = Transaction.query.filter(Transaction.transaction_type == "withdraw").all()
# return jsonify({
# 'The total debit is' : Transaction.query.filter(Transaction.transaction_type == "withdraw").sum()
# }), 201
all = []
with engine.connect() as connection:
qry = text("SELECT sum(transaction_ammount) FROM Transaction where transaction_type = 'withdraw' ")
result = connection.execute(qry)
for i in result:
all.append({
'The total debit is':i[0]
})
return jsonify(all)
# total credit (deposit) (Admin)
@app.route('/transaction/credit/', methods = ["GET"])
def get_total_transfer():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
all = []
with engine.connect() as connection:
qry = text("SELECT sum(transaction_ammount) FROM Transaction where transaction_type = 'save' ")
result = connection.execute(qry)
for i in result:
all.append({
'The total credit is':i[0]
})
return jsonify(all)
# total balance (user balance) (Admin)
@app.route('/account/balance/', methods = ["GET"])
def get_total_balance():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
all = []
with engine.connect() as connection:
qry = text("SELECT sum(account_balance) FROM Account")
result = connection.execute(qry)
for i in result:
all.append({
'The total balance is':i[0]
})
return jsonify(all)
# user number
@app.route('/user/number/', methods = ["GET"])
def get_user_number():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
return jsonify({
'The number of registered users are' : Userz.query.count()
}), 201
def get_dormant_period(a, b): # data["today's_date"], acc.last_transaction
data1, data2 = a, b
a = a.split("-")
# b = b.split("-")
y1, m1, d1 = int(a[0]), int(a[1]), int(a[2])
# y2, m2, d2 = int(b[0]), int(b[1]), int(b[2])
y2, m2, d2 = b.year, b.month, b.day
# if y1 > y2:
if m1 < m2:
y1 -= 1
m1 += 12 # 12 months in a year
if d1 < d2:
m1 -= 1
d1 += 30 # imagine all months have 30 days
count_y = y1 - y2
count_m = m1 - m2
count_d = d1 - d2
return {
"Year(s)": count_y,
"Month(s)": count_m,
"Day(s)": count_d
}
if d1 > d2 or d1 == d2:
count_y = y1 - y2
count_m = m1 - m2
count_d = d1 - d2
return {
"Year(s)": count_y,
"Month(s)": count_m,
"Day(s)": count_d
}
if m1 > m2:
if d1 < d2:
m1 -= 1
d1 += 30 # imagine all months have 30 days
count_y = y1 - y2
count_m = m1 - m2
count_d = d1 - d2
return {
"Year(s)": count_y,
"Month(s)": count_m,
"Day(s)": count_d
}
if d1 > d2 or d1 == d2:
count_y = y1 - y2
count_m = m1 - m2
count_d = d1 - d2
return {
"Year(s)": count_y,
"Month(s)": count_m,
"Day(s)": count_d
}
# list account that ever went dormant (has no transaction for 3 months straight). show account information and dormant period
@app.route('/account/dormant/', methods = ["GET"])
def get_dormant():
login = authz()
if check_user(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Username is not registered'
}), 400
else:
if get_password(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'Wrong Password'
}), 400
else: #if true
if not get_is_admin(login): #if false
return jsonify({
'error' : 'Bad Request',
'message' : 'You do not have the authority to do this operation'
}), 400
else: #if true
data = request.get_json()
if not "today's_date" in data:
return jsonify({
'error' : 'Bad Request',
'message' : "today's_date is not given"
}), 400
else:
date1 = data["today's_date"]
x = date1.split("-")
y, m, d = int(x[0]), int(x[1]), int(x[2])
if m < 4:
y -= 1
if m == 3:
m1 = 12
if m == 2:
m1 = 11
if m == 1:
m1 = 1
date2 = str(y)+"-"+str(m1)+"-"+str(d)
account = Account.query.filter_by(last_transaction < date2).all
return jsonify([
{
"Account Number": acc.account_number,
"Account Type": acc.account_type,
"Account Balance": acc.account_balance,
"Last Transaction": acc.last_transaction,
"Owner Name": acc.x.full_name,
"Dormant Period": get_dormant_period(data["today's_date"], acc.last_transaction)
} for acc in account
])
if m > 3:
m -= 3
date3 = str(y)+"-"+str(m)+"-"+str(d)
account = Account.query.filter(Account.last_transaction < date3).all()
return jsonify([
{
"Account Number": acc.account_number,
"Account Type": acc.account_type,
"Account Balance": acc.account_balance,
"Last Transaction": acc.last_transaction,
"Owner Name": acc.x.full_name,
"Dormant Period": get_dormant_period(data["today's_date"], acc.last_transaction)
} for acc in account
])
|
from typing import List, Union, NamedTuple
from .dictionary import ZMachineDictionary
from .data_structures import ZWord, ZByte
class WordAndOffset(NamedTuple):
word: str
offset: int
next_offset: int
def _next_word(memory: memoryview, word_separators: List[int], offset: int) -> Union[None, WordAndOffset]:
""" Read the next word from the text buffer and return it along ith the offset to the next word.
:param memory:
:param word_separators:
:param offset:
:return: Word and offset to next word, or None if there is no next word. If the word is an empty string, then no
word was found at the given offset so continue iterating
"""
if memory[offset] in word_separators:
# If this is a word separator just return it by itself
return WordAndOffset(chr(memory[offset]), offset, offset + 1)
elif memory[offset] == 0:
# Noooo mooorrree wooorrddsss
return None
word = ''
i = offset
while True:
if (memory[i] in word_separators) or (memory[i] == 0):
# The word separator will be the next word so retain the offset
return WordAndOffset(word, offset, i)
if memory[i] == ord(' '):
# Spaces are not considered words so return the offset to the next word
return WordAndOffset(word, offset, i + 1)
else:
word += chr(memory[i])
i += 1
def tokenize(memory: memoryview, dictionary: ZMachineDictionary, text_buffer: int, parse_buffer: int):
""" Tokenize the string in the text buffer into the token buffer
:param memory: Memory to read words and write tokens to
:param dictionary: Dictionary to read words and word-separators from
:param text_buffer: Offset of the text-buffer
:param parse_buffer: Offset of the parse-buffer
"""
# The max number of words is stored in the first byte of the the parse-buffer
max_tokens = memory[parse_buffer]
token_i = 0
word_offset = _next_word(memory, dictionary.word_separators, text_buffer + 1)
while (word_offset is not None) and (token_i < max_tokens):
if len(word_offset.word) > 0:
dict_word_addr = dictionary.lookup_word(word_offset.word)
num_letters = len(word_offset.word)
block_offset = parse_buffer + 2 + (token_i * 4)
ZWord.from_unsigned_int(dict_word_addr).write(memory, block_offset)
ZByte.from_unsigned_int(num_letters).write(memory, block_offset+2)
ZByte.from_unsigned_int(word_offset.offset - text_buffer).write(memory, block_offset+3)
token_i += 1
word_offset = _next_word(memory, dictionary.word_separators, word_offset.next_offset)
# Write the number of tokens parsed into byte 1 of the parse buffer
ZByte.from_unsigned_int(token_i).write(memory, parse_buffer + 1)
|
import json
from .recipe_class import Recipe
# Receives request from frontend, returns list of ingredients
def process_incoming_data(request):
# Get data from request (a list of strings = ingredients)
data_in = json.loads(request.body) # the body of the request is a list of ingredients (list of strings)
ingredients = data_in['ingredients']
# Log data received to console
print("POST INFO: Ingredients = " + str(ingredients))
print("(Dish type blank for now...)")
return ingredients
# determines # of additional ingredients in recipe
def num_of_additional_ingredients(recipe, givenIngredients):
print(recipe.title)
print(recipe.ingred_list)
print(givenIngredients)
add_ingred = 0
for ingr in recipe.ingred_list:
found = False
for given in givenIngredients:
if ingr == given:
found = True
if not found:
add_ingred += 1
print(add_ingred)
return add_ingred
# Parse RecipePuppy result. Returns a list of Recipe objects.
def process_query_result(query_result, given_ingredients):
# Turn each dict in the query result into a Recipe object.
recipes = []
for rec in query_result:
recipes.append(Recipe(rec["href"], rec["ingredients"], rec["thumbnail"], rec["title"]))
# ... DO SOME STUFF ...
for recipe in recipes:
recipe.replace_thumbnail()
recipe.clean_title()
recipes.sort(key=lambda x: num_of_additional_ingredients(x, given_ingredients))
# "Dictify" each recipe and return list of dicts ready for sending to frontend as JSON
result = []
for recipe in recipes:
result.append(recipe.dictify())
return result
|
import urllib.request
url = 'http://cirtec.ranepa.ru/Word2Vec/fixes.raw.txt'
response = urllib.request.urlopen(url)
data = response.read() # a `bytes` object
with open('../initial_data/Word2Vec__fixes.raw.txt', 'wb') as f:
f.write(data)
|
#!/usr/bin/env python2
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
import os
import sys
sys.path.append(os.environ.get('GRC_HIER_PATH', os.path.expanduser('~/.grc_gnuradio')))
from PyQt4 import Qt
from concurrent_transmission import concurrent_transmission # grc-generated hier_block
from gnuradio import eng_notation
from gnuradio import fosphor
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.fft import window
from gnuradio.filter import firdes
from gnuradio.qtgui import Range, RangeWidget
from optparse import OptionParser
import correctiq
import osmosdr
import sip
import time
from gnuradio import qtgui
import hackrf_software_channel
def argument_parser():
description = 'HackRF Software Channel'
parser = OptionParser(usage="%prog: [options]", option_class=eng_option, description=description)
return parser
def main(options=None):
if options is None:
options, _ = argument_parser().parse_args()
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = hackrf_software_channel.hackrf_software_channel()
tb.start()
tb.show()
while True:
for rate in range(10000000,5000000,-50000):
time.sleep(1)
tb.set_samp_rate(rate)
print(rate)
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("My pid in case you need more violent termination: %d"%(os.getpid(),))
os.kill(os.getpid(), signal.SIGTERM)
|
from __future__ import with_statement
from pyaspell import Aspell
from util.net import isurl
from common.spelling.dicts import MakeDigsbyDict
from common.filetransfer import ManualFileTransfer
from common import profile, pref
from common.notifications import fire
objget = object.__getattribute__
import wx, os
import syck
import tarfile
import subprocess, _subprocess
from path import path
from util import callsback, threaded, autoassign, program_dir
import stdpaths
from common import setpref
from traceback import print_exc
import logging
log = logging.getLogger('spellchecker')
#L_O_G = log
ASPELLBINDIR = (program_dir() /'lib' / Aspell.LIBNAME).parent
from subprocess import Popen, PIPE, CalledProcessError
ASPELL_CMD = './lib/aspell/bin/aspell %s -a --ignore-case'
ASPELL_OPT = '--%s="%s"'
ASPELL_DFT = ASPELL_CMD % '--lang=en --encoding=utf-8 --keyboard=standard --sugMode=normal'
def FormAspellCommand(parameters):
options = ' '.join([ASPELL_OPT %(key,parameters[key].replace('\\','/')) for key in parameters])
cmd = ASPELL_CMD % options
return cmd
class NullSpellEngine(object):
"""
Fake SpellEngine for when there is none
"""
lang = None
def check(self, word):
return True
def suggest(self, word):
return []
def kill(self):
pass
def add(self):
pass
class SpellEngine(object):
"""
Wraps a asepell process so the SpellCheck class can treat it as an object
"""
def __init__(self, parameters = None):
self.vLog = pref('messaging.spellcheck.verbose_log', default=False)
if parameters:
self.lang = parameters['lang']
self.cmd = FormAspellCommand(parameters)
else:
self.lang = 'en'
self.cmd = ASPELL_DFT
self.start()
def __nonzero__(self):
"""
If the process is till running the value of a SpellEnging is True
Otherwise it is False
"""
return self.aspell.poll() is None
def start(self):
"""
Start aspell process
"""
log.info('Starting aspell process with %s', self.cmd)
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= _subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = _subprocess.SW_HIDE
self.aspell = Popen(self.cmd.encode('filesys'), stdin=PIPE, stdout=PIPE, stderr=PIPE, startupinfo=startupinfo)
exitCode = self.aspell.poll()
if exitCode != None:
log.error('Aspell failed to start with exit code %s with comand: \n%s', exitCode, self.cmd)
startupstring = self.aspell.stdout.readline()
if startupstring == '':
log.error('Aspell failed to start and is exiting')
raise CalledProcessError(0,'ZOMG')
def kill(self):
"""
Kill the aspell process
"""
log.info('Killing Aspell')
self.aspell.stdin.close()
def add(self, word):
"""
Add a word to the aspell dictionary and save it to disk
"""
self.resuscitate()
#L_O_G.info('Adding "%s" to dictionary', word)
# in aspell &word saves word in the personal dictionary, and sending # saves the changes to disk
try:
self.aspell.stdin.write('*%s\n#\n' % word)
except:
log.error('Failed communicating to aspell process, poll result: %s;', self.aspell.poll())
def resuscitate(self):
"""
Starts a new aspell process if the process is dead
"""
aspellIsDead = self.aspell.poll() is not None
if aspellIsDead:
log.info('Resuscitating aspell')
self.start()
def suggest(self, word):
"""
Return a list of words that are possible corrections to word if word is misspelled
"""
self.resuscitate()
#L_O_G.info('Looking up suggestions for "%s"', word)
try:
self.aspell.stdin.write(('$$cs sug-mode,normal\n')) #switch mode to normal
self.aspell.stdin.write(('^%s\n' % word)) #prepend ^ to the word to make sure it's checked and not treated as a command
self.aspell.stdin.write(('$$cs sug-mode,ultra\n')) #switch mode back to ultra
except IOError:
log.error('Failed communicating to aspell process, poll result: %s;', self.aspell.poll())
return []
output = self.aspell.stdout.readline()
#return an empty list if the word is correct(*) or no suggestions(#)
if not output or output[0] in '*#':
while output != '\r\n' and output != '':
output = self.aspell.stdout.readline()
return []
# expected format of aspell responce:
# & [original] [count] [offset]: [miss], [miss], ...
cutindex = output.find(':') + 2
output = output[cutindex:].strip()
suggestions = output.split(', ')
#L_O_G.info('Suggested %s', suggestions)
#flush stdout
while output != '\r\n' and output != '':
#L_O_G.info('Output %s', output)
output = self.aspell.stdout.readline()
return suggestions
def check(self, word):
"""
Return True if the word is correctly spelled False otherwise
"""
self.resuscitate()
#L_O_G.info('Checking %s...;', word)
try:
self.aspell.stdin.write(('^%s\n' % word))
except IOError:
log.error('Failed communicating to aspell process, poll result: %s;', self.aspell.poll())
return True
output = self.aspell.stdout.readline()
if not output:
log.error('Aspell is likely dead, empty string read from stdout, poll result: %s;', self.aspell.poll())
return True
#Correct spelling is signified by a '*'
correct = output[0] == '*'
#L_O_G.info('Checked. %s is %s;', word,'OK' if correct else 'INCORRECT')
#flush stdout
while output != '\r\n':
output = self.aspell.stdout.readline()
return correct
def LocalAspellDataDir(more = ''):
"""
Build a return data directory
"""
return (stdpaths.userlocaldata / ('aspell%s'%Aspell.VERSION) / 'dict' / more) #@UndefinedVariable
class SpellChecker(object):
def __init__(self, lang_override = None):
#L_O_G.info('init spellchecker')
self.spellengine = None
self.lang = None
self._need_to_download = None
self.currentDownloads = set()
self.expectedNext = None
# load YAML file describing the dictionaries
filename = program_dir() / 'res' / ('dictionaries-%s.yaml' % Aspell.VERSION)
try:
with open(filename) as f:
self.dict_info = syck.load(f)
if not isinstance(self.dict_info, dict):
raise ValueError('invalid YAML in %s' % filename)
except Exception:
print_exc()
self.dict_info = {}
# load an engine using swap engine, if no engine is failed use the NullSpellEngine
if not self.SwapEngine(lang_override):
self.spellengine = NullSpellEngine()
profile.prefs.add_observer(self.on_prefs_change, #@UndefinedVariable
'messaging.spellcheck.enabled',
'messaging.spellcheck.engineoptions.lang',
'messaging.spellcheck.engineoptions.encoding',
'messaging.spellcheck.engineoptions.keyboard',
'messaging.spellcheck.engineoptions.sug-mode') #@UndefinedVariable
def CreateEngine(self, lang_override=None):
'''
Create an Aspell engine from the values in prefs. Optional lang_override allows for creating an engine in a different
language.
http://aspell.net/man-html/The-Options.html
TODO: take lots of kwargs and use them to override the options going into the Aspell engine
Returns the new Aspell object if it was created.
Returns None if the requested language was not found.
Raises all unknown errors.
'''
if (not self._pref('enabled')) or \
pref('messaging.spellcheck.engineoptions.lang') not in self.dict_info:
return NullSpellEngine()
#Time to build the args
#first set of args comes from the prefs
spellprefs = 'lang encoding keyboard'.split()
parameters = dict((str(key), str(pref('messaging.spellcheck.engineoptions.' + key))) for key in spellprefs)
#so check is fast
parameters['sug-mode'] = 'ultra'
if lang_override is not None:
parameters['lang'] = lang_override
lang = parameters['lang']
#set the directories
local_dir = LocalAspellDataDir()
parameters['local-data-dir'] = local_dir.encode('filesys')
parameters['add-word-list-path'] = local_dir.encode('filesys')
home_dir = local_dir / profile.username
if not home_dir.isdir():
home_dir.makedirs()
parameters['home-dir'] = home_dir.encode('filesys')
if not lang.startswith('en'):
parameters['dict-dir'] = local_dir.encode('filesys')
#If the digsby dict for this language doesn't exist, make it, mostly just for english the first time you run it
#other languages should lready have it at this point
digsby_dict_location = local_dir / ('digsby-%s.rws' % lang)
if not digsby_dict_location.isfile():
try:
MakeDigsbyDict(lang, local_dir)
except CalledProcessError, e:
log.error("failed to create Digsby Dictionary in '%s' at '%s', probable cause: dict not yet downloaded, exception was '%s'", lang, local_dir, e)
return None
parameters['add-extra-dicts'] = digsby_dict_location.encode('filesys')
#encode for filesystem
for k,v in parameters.items():
if isinstance(v, unicode):
parameters[k] = v.encode('filesys')
try:
speller = SpellEngine(parameters)
except CalledProcessError:
log.error('SpellEngine failed to load, returning None')
speller = None
return speller
def __nonzero__(self):
"""
True if aspell is running, false otherwise
"""
return self.aspell.poll() == None
def on_prefs_change(self, *a, **k):
'''
This is the function that watches the related prefs. currently we just create a new engine and toss the old one.
'''
log.info('Spelling prefs changed, switching engines')
self.SwapEngine()
def SwapEngine(self, lang_override=None, shouldDownloadOnFail = True):
'''
Toss the old spellengine and create a new one using CreateEngine().
If creation fails, the last language used is substituted and the '_need_to_download' attribute
is set to the requested language.
Takes an optional lang_override to create another spell checker. This is passed directly to CreateEngine
Returns True if a new engine was created and False if the old one was retained.
'''
#L_O_G.info('SwapEngine')
try:
newengine = self.CreateEngine(lang_override)
except Exception:
log.error('Something just went horribly wrong in CreateEngine...')
print_exc()
return False
if not newengine and shouldDownloadOnFail: # fail, but download
self._need_to_download = lang_override or self._pref('engineoptions.lang')
wx.CallAfter(self.DownloadDict)
return False
elif newengine: #success
if self.spellengine is not None:
self.spellengine.kill()
self.spellengine = newengine
self.lang = self.spellengine.lang
log.info('Speller switched to %r', self.lang)
return True
else: #Epic Fail
log.error("Language not loaded but already attempted retrieving it")
return False
def _pref(self, name, default=sentinel, type=sentinel):
'''
Convenience method to get a pref prefixed with 'messaging.spellcheck'
'''
return pref('messaging.spellcheck.'+name, default=default, type=type)
def _get_encoding(self):
return self._pref('engineoptions.encoding', type=str, default='utf-8')
def _encode(self, s):
'''
Encode a string using the encoding determined by the user's spellcheck prefs
'''
if isinstance(s, unicode):
return s.encode(self._get_encoding())
else:
return s
def _decode(self, s):
'''
Decode a string using the encoding determined by the user's spellcheck prefs
'''
if isinstance(s, str):
return s.decode(self._get_encoding())
else:
return s
def Check(self,text):
"""
Returns True if the word is correctly spelled, false otherwise
"""
if self.spellengine is None or not text:
return True
puretext = self._encode(text)
return puretext.isdigit() or isurl(puretext) or self.spellengine is None or self.spellengine.check(puretext)
def Suggest(self,word,count=None):
"""
Return a list of suggested replacement words if the word is spelled incorrectly
Returns an empty list if the word is correctly spelled
"""
if self.spellengine is None:
return []
if not word:
return []
if not count:
count = self._pref("max_suggestions")
suggestions = self.spellengine.suggest(self._encode(word))
if len(suggestions) > count:
suggestions = suggestions[:count]
return [self._decode(s) for s in suggestions]
def Add(self, word):
"""
Add a word to the dictionary
"""
if self.spellengine is None:
return
self.spellengine.add(self._encode(word))
def DownloadDict(self):
"""
Get everything set for, then call, DownloadAndInstall
"""
# decide if we actualy need to get the language
self._need_to_download, need = None, self._need_to_download
if not need or need == self.lang:
log.error('not downloading dictionary')
return
#set what langugae is expected next
self.expectedNext = need
if need in self.currentDownloads:
log.info('Already downloading dictionary, returning')
return
#Get the full name of the language
langInfo = self.dict_info[need]
langName = langInfo['name_english']#'name_native' if 'name_native' in langInfo else
#ask the user about downloading
log.info('Download %s?', need)
userResponse = wx.MessageBox(_('You need to download the {langname} dictionary to use it. Would you like to download this dictionary now?').format(langname=langName),
_('Download Dictionary?'),
wx.YES_NO)
#if the user answered no, inform them of how to download and return
if userResponse == wx.NO:
lastlang = self.spellengine.lang
if lastlang:
setpref('messaging.spellcheck.engineoptions.lang', lastlang)
dictcancel_hdr = _('Dictionary not downloaded.')
dictcancel_msg = _('To download it later, select it in the Conversation Preferences.')
wx.MessageBox(u'%s\n\n%s' % (dictcancel_hdr, dictcancel_msg),
_('Download Dictionary Canceled'),
wx.OK)
return
#build URL
remote_repo = pref('messaging.spellcheck.aspell_mirror', type=str, default='http://dict.digsby.com/')
remote_path = remote_repo + langInfo['location']
def on_install_success():
log.info('%r has been installed.', need)
#Swap out the language if the new language is still selected
if self.expectedNext == need:
#Attempt the swap and fire a notification on success
if self.SwapEngine(shouldDownloadOnFail=False):
fire('dictionary.install',
title=_('Dictionary Set'),
msg=_('Spellcheck language has been set to {langname}.').format(langname=langName),
popupid='dict_install_%s' % self.lang)
#If successfull download and install, but fails to load, fire a error notification
else:
fire('dictionary.install',
title=_('Spellcheck error'),
msg=_('Failed setting up dictionary. Try reselecting desired language in the preferences.'),
popupid='dict_install_%s' % self.lang)
#if no longer the set language announce the install was complete
else:
fire('dictionary.install',
title=_('Dictionary Installed'),
msg=_('You can set this language in the conversation preferences.'),
popupid='dict_install_%s' % self.lang)
#Remove the language from current downloads
self.currentDownloads.discard(need)
#if there's an error, log it
def on_install_error():
log.error('There was an error installing %s', need)
self.currentDownloads.discard(need)
def on_install_cancel():
log.info('Dictionary download cancelled by user.')
self.currentDownloads.discard(need)
lastlang = self.spellengine.lang
if lastlang:
setpref('messaging.spellcheck.engineoptions.lang', lastlang)
#add to the current downloads set to pervent duplicate downloads
self.currentDownloads.add(need)
#Start download
log.info('Downloading %r from %r', need, remote_path)
DownloadAndInstall(need, langName, remote_path,
cancel = on_install_cancel,
success = on_install_success,
error = on_install_error)
class DictionaryInstaller(object):
SUFFIXES = '.alias .multi .cwl .rws .dat'.split()
def __init__(self, id, bz2path):
autoassign(self, locals())
self.cwl_files = []
@callsback
def Install(self, callback=None):
log.info('Installing Dictionary...')
#fire a notification
fire('dictionary.install', title=_('Installing Dictionary'), msg=_('Dictionary will be activated after install completes.'),
popupid='dict_install_%s' % self.id)
#go Extract, then Finalize on success
self.Extract(error = callback.error,
success = lambda:self.Finalize(callback=callback))
log.info('Finished Installing Dictionary')
@threaded
def Extract(self):
"""
Extract the usefull files from the tar.bz2 to the local dict directory
"""
log.info('Extracting Dictionary...')
log.info('Opening tar %s', self.bz2path)
tar = tarfile.open(fileobj=open(self.bz2path, 'rb'), mode='r:bz2')
log.info('Tar opened')
fobj = None
outfile = None
try:
#Extract any .alias, .multi, .cwl, .rws, and .dat files from the temp file
log.info('Retrieving file information from tar')
for fileinfo in tar:
if not fileinfo.isfile():
continue
fname = path(fileinfo.name.decode('filesys'))
if fname.ext and fname.ext in self.SUFFIXES:
log.info('Extracting %s', fname)
ex_path = path(LocalAspellDataDir()) / fname.name
if fname.ext == '.cwl':
self.cwl_files.append(ex_path)
if not ex_path.parent.isdir():
ex_path.parent.makedirs()
fobj = tar.extractfile(fileinfo)
with open(ex_path, 'wb') as outfile:
while outfile.tell() < fileinfo.size:
outfile.write(fobj.read(16*1024))
log.info('Extracted %s', fname)
else:
log.info('Ignoring %s', fname)
except Exception:
log.error('Failed extracting files')
finally:
#close all files
for f in (tar, fobj, outfile):
if f is not None:
f.close()
log.info('Finished Extracting Dictionary...')
return True
@threaded
def Finalize(self):
"""
Decompress the CWLs to make RWSs
"""
def quote_encode(s):
return '"%s"' % s.encode('filesys')
aspell_opts = ["--lang=%s" % self.id,
"--local-data-dir=%s" % quote_encode(LocalAspellDataDir().strip('\\')),
"create", "master"]
decomp_opts = ['d']
decomp_exe = ASPELLBINDIR/'word-list-compress.exe'
aspell_exe = ASPELLBINDIR/'aspell.exe'
startupinfo = subprocess.STARTUPINFO() #@UndefinedVariable
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW #@UndefinedVariable
startupinfo.wShowWindow = subprocess.SW_HIDE #@UndefinedVariable
log.info('Decompressing wordlists')
for cwl in self.cwl_files:
rws = path(LocalAspellDataDir()) / cwl.namebase + '.rws'
command = ['cmd', '/c', quote_encode(decomp_exe)] + decomp_opts + \
['<', quote_encode(cwl), '|', quote_encode(aspell_exe)] + aspell_opts + [quote_encode(rws)]
command = ' '.join(command)
# this will raise an exception if the command fails, and callsback will call our error callback
log.info('Decompressing %s', cwl)
log.info("Executing: %r", command)
subprocess.check_call(command, shell=True, startupinfo=startupinfo)
os.remove(cwl)
os.remove(self.bz2path)
#Make the digsby dict
local_dir = LocalAspellDataDir()
digsby_dict_location = local_dir / ('digsby-%s.rws' % id)
if not digsby_dict_location.isfile():
try:
MakeDigsbyDict(self.id, local_dir)
except Exception:
log.error("failed to create Digsby Dictionary in '%s', probable cause: dict not yet downloaded", id)
return None
#dictionary installed notification
fire('dictionary.install', title=_('Dictionary Installed'), msg=_('Setting spellcheck language...'),
popupid='dict_install_%s' % self.id)
return True
@callsback
def DownloadAndInstall(langID, langName, remotePath, cancel, callback=None):
"""
Does what it says, via a DictionaryDownloader and DictionaryInstaller Objects, using alot of callbacks
"""
log.info('Downloading dictionary...')
ManualFileTransfer( _('{langname} Dictionary').format(langname=langName), remotePath,
lambda downloaded_path: DictionaryInstaller(langID, downloaded_path).Install(callback=callback),
cancel,
callback.error).manual_download()
'''
cd aspell
bin\word-list-compress.exe d < dict\es.cwl | bin\aspell.exe --lang=es create master es.rws
'''
class SpellCheckerMock(object):
"""
Stuff to make it so there can only be one
"""
def __getattr__(self, key, default = sentinel):
try:
spellchecker = object.__getattribute__(self, '_spellchecker')
except AttributeError:
#L_O_G.info('No old spellchecker found... Creating')
try:
spellchecker = SpellChecker()
except:
spellchecker = SpellChecker(lang_override='en')
#L_O_G.info('Setting _spellchecker')
object.__setattr__(self, '_spellchecker', spellchecker)
if default is sentinel:
return getattr(spellchecker, key)
else:
return getattr(spellchecker, key, default)
spellchecker = SpellCheckerMock()
|
import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
from kvv_processor.model.weather import Weather
Base = declarative_base()
class DatabaseWeather(Base):
__tablename__ = 'Weather'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
temp = sqlalchemy.Column(sqlalchemy.DECIMAL)
humidity = sqlalchemy.Column(sqlalchemy.DECIMAL)
pressure = sqlalchemy.Column(sqlalchemy.DECIMAL)
wind = sqlalchemy.Column(sqlalchemy.DECIMAL)
clouds = sqlalchemy.Column(sqlalchemy.DECIMAL)
def to_weather_object(self):
return Weather(self.id, self.temp, self.humidity, self.pressure, self.wind, self.clouds) |
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 13 13:40:52 2019
@author: domin
"""
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import axes3d
#used to plot the Bloch sphere
phi = np.linspace(0, np.pi, 20)
theta = np.linspace(0, 2 * np.pi, 30)
blochX = np.outer(np.sin(theta), np.cos(phi))
blochY = np.outer(np.sin(theta), np.sin(phi))
blochZ = np.outer(np.cos(theta), np.ones_like(phi))
#point on the Bloch sphere
theta1=np.pi/4
phi1=0
x=np.sin(theta1)*np.cos(phi1)
y=np.sin(theta1)*np.sin(phi1)
z=np.cos(theta1)
print(x,y,z)
X=np.array([[0,1],[1,0]])
psi=np.array([[np.cos(theta1/2)],[np.sin(theta1/2)]])
psi2=np.matmul(X,psi)
theta2=np.arccos(psi2[0,0])*2
x2=np.sin(theta2)*np.cos(0)
y2=np.sin(theta2)*np.sin(0)
z2=np.cos(theta2)
fig=plt.figure(figsize=(15,15))
ax = fig.add_subplot(111, projection='3d', aspect='equal')
#connect a vector to the point on the sphere
origin = [[0],[0],[0]]
ax.quiver(*origin, [x],[y],[z],arrow_length_ratio=0.1)
ax.quiver(*origin, [x2],[y2],[z2],color='r',arrow_length_ratio=0.1)
#generates the X,Y,Z axis on the Bloch sphere
ax.quiver([-1],[0],[0], [2],[0],[0], color='k', lw=1.5,arrow_length_ratio=0)
ax.quiver([0],[-1],[0], [0],[2],[0], color='k', lw=1.5,arrow_length_ratio=0)
ax.quiver([0],[0],[-1], [0],[0],[2], color='k', lw=1.5,arrow_length_ratio=0)
#plots the sphere
ax.plot_wireframe(blochX,blochY,blochZ, color='y', rstride=1, cstride=1)
#plots the point
ax.scatter(x, y, z, s=100, c='r', zorder=10)
ax.scatter(x2, y2, z2, s=100, c='r', zorder=10)
#changes the values below to angle of orientation
ax.view_init(azim=20, elev=10) |
import torch
import torch.nn as nn
from transformers import BertModel
class TextClassificationModel(nn.Module):
def __init__(self, rnn_dim, rnn_num_layer, im_dim, que_dim, key_dim, deep, num_class):
super(TextClassificationModel, self).__init__()
self.dropout = nn.Dropout(0.2)
self.rnn = nn.LSTM(input_size=768, hidden_size=rnn_dim, num_layers=rnn_num_layer, batch_first=True,
dropout=0.2, bidirectional=True)
self.rnn_im = nn.Linear(rnn_dim*2, im_dim)
self.im_out = nn.Linear(im_dim, num_class)
# Att part
self.pro_query = nn.Linear(que_dim, deep)
self.pro_key = nn.Linear(key_dim, deep)
self.pro_value = nn.Linear(rnn_dim * 2, rnn_dim * 2)
def forward(self, inputs, sample_m, sentence_ms, sen_mask, using_GPU=False):
rnn_re, _ = self.rnn(inputs)
rnn_re = self.dropout(rnn_re)
batch_query = self.pro_query(sample_m).unsqueeze(1)
batch_keys = self.pro_key(sentence_ms).permute(0, 2, 1)
batch_values = rnn_re
batch_logits = torch.matmul(batch_query, batch_keys)
batch_logits = batch_logits + sen_mask.unsqueeze(1)
batch_weights = nn.functional.softmax(batch_logits, dim=-1)
result = torch.matmul(batch_weights, batch_values).squeeze(1)
im = self.rnn_im(result)
im = self.dropout(im)
out = self.im_out(im)
output = nn.functional.log_softmax(out, dim=-1)
return output
|
import requests
import logging
logging.getLogger("requests").setLevel(logging.DEBUG)
rq = requests.Session()
resp = rq.get('http://127.0.0.1:8085/foo')
assert('/foo' in resp.text)
assert('/bar' not in resp.text)
print(resp)
resp = rq.get('http://127.0.0.1:8085/bar', timeout=2)
assert('/bar' in resp.text)
assert('/foo' not in resp.text)
print(resp)
|
import calendar
def main():
date = input("Enter a date formatted as dd/mm/rrrr: ")
date_list = date.split('/')
day = date_list[0]
month = int(date_list[1])
month_full_name = calendar.month_name[month]
year = date_list[2]
print(f'{day} {month_full_name} {year}')
main()
|
# Generated by Django 3.1.10 on 2021-07-30 09:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("editorial", "0018_add_help_text"),
]
operations = [
migrations.DeleteModel(
name="EditorialPageSubjectPlacement",
),
]
|
#!bin/python3
size = int(input().strip())
Arr = [int(arrItem) for arrItem in input().strip().split(' ')]
e = Arr[len(Arr) - 1]
tempIndex = 0
#print(Arr, len(Arr), e)
#Finds where the location we want to put two at.
for i in range(size-1):
if e > Arr[i] and e < Arr[i+1]:
tempIndex = i+1
#print (tempIndex)
hold = size - 1
for hold in range(size - 1, tempIndex, -1):
Arr[hold] = Arr[hold - 1]
print(*Arr)
Arr[tempIndex] = e
print(*Arr)
|
__all__ = ()
from re import compile as re_compile, escape as re_escape, I as re_ignore_case, U as re_unicode
from scarletio import LOOP_TIME, Task
from hata import KOKORO, DiscordException, ERROR_CODES, InviteTargetType, Embed, ICON_TYPE_NONE, elapsed_time, \
Permission, Emoji
from hata.ext.slash import abort
from ..bot_utils.constants import GUILD__SUPPORT
from ..bots import SLASH_CLIENT
CACHING_INTERVAL = 8 * 60.0 * 60.0 # 8 hour
GUILD_PER_PAGE = 5
PATTERN = re_compile(
'|'.join(
re_escape(value) for value in (
'komeiji', 'koishi', '古明地', 'こいし', 'kkhta'
)
),
re_ignore_case | re_unicode,
)
GUILD_CACHE = []
EMBED_CACHE = {}
EMBED_BUILDER_TASKS = {}
def clear_cache():
GUILD_CACHE.clear()
EMBED_CACHE.clear()
EMBED_BUILDER_TASKS.clear()
def cache_guilds():
guilds = []
for guild in SLASH_CLIENT.guilds:
if guild.owner_id not in GUILD__SUPPORT.users:
continue
if PATTERN.search(guild.name) is None:
continue
guilds.append(guild)
guilds.sort()
guild_chunk = []
guild_chunk_length = 0
for guild in guilds:
guild_chunk.append(guild)
guild_chunk_length += 1
if guild_chunk_length == GUILD_PER_PAGE:
GUILD_CACHE.append(guild_chunk.copy())
guild_chunk.clear()
guild_chunk_length = 0
if guild_chunk_length:
GUILD_CACHE.append(guild_chunk.copy())
def get_embed(page):
try:
return EMBED_CACHE[page]
except KeyError:
pass
if (page < 1) or (page > len(GUILD_CACHE)):
abort(f'Page index out of expected range: [1, {len(GUILD_CACHE)}]')
return build_embed(page)
async def build_embed(page):
yield
task = EMBED_BUILDER_TASKS.get(page, None)
if task is None:
task = Task(KOKORO, build_embed_task(page))
EMBED_BUILDER_TASKS[page] = task
embeds = None
try:
embeds = await task
finally:
if EMBED_BUILDER_TASKS.get(page, None) is task:
del EMBED_BUILDER_TASKS[page]
if embeds is not None:
EMBED_CACHE[page] = embeds
else:
embeds = await task
yield embeds
async def build_embed_task( page):
guilds = GUILD_CACHE[page - 1]
embeds = []
for guild in guilds:
embed = await build_guild_embed(guild)
embeds.append(embed)
embeds[-1].add_footer(f'Page {page} out of {len(GUILD_CACHE)}')
return embeds
async def build_guild_embed(guild):
invite_url = await try_get_invite_url_of(guild)
approximate_user_count = guild.approximate_user_count
if approximate_user_count == 0:
await SLASH_CLIENT.guild_get(guild)
approximate_user_count = guild.approximate_user_count
if invite_url is None:
description = None
else:
vanity_code = guild.vanity_code
if vanity_code is None:
description = f'[Join {guild.name} !]({invite_url})'
else:
description = f'[Join discord.gg/{vanity_code} !]({invite_url})'
embed = Embed(
guild.name,
description,
color = (guild.icon_hash & 0xFFFFFF if (guild.icon_type is ICON_TYPE_NONE) else (guild.id >> 22) & 0xFFFFFF),
).add_thumbnail(
guild.icon_url_as(size=128),
)
guild_description = guild.description
if (guild_description is not None):
embed.add_field(
'Description',
(
f'```\n'
f'{guild_description}\n'
f'```'
),
)
embed.add_field(
'Users',
(
f'```\n'
f'{approximate_user_count}\n'
f'```'
),
inline = True,
).add_field(
'Online users',
(
f'```\n'
f'{guild.approximate_online_count}\n'
f'```'
),
inline = True,
).add_field(
'Age',
(
f'```\n'
f'{elapsed_time(guild.created_at)}\n'
f'```'
),
).add_field(
'Boost level',
(
f'```\n'
f'{guild.premium_tier}\n'
f'```'
),
inline = True,
).add_field(
'Emojis',
(
f'```\n'
f'{len(guild.emojis)}\n'
f'```'
),
inline = True,
).add_field(
'Stickers',
(
f'```\n'
f'{len(guild.stickers)}\n'
f'```'
),
inline = True,
)
return embed
async def try_get_invite_url_of(guild):
invite_url = guild.vanity_url
if (invite_url is not None):
return invite_url
if not guild.cached_permissions_for(SLASH_CLIENT).can_create_instant_invite:
return None
try:
invites = await SLASH_CLIENT.invite_get_all_guild(guild)
except ConnectionError:
raise
except DiscordException as err:
if err.code in (
ERROR_CODES.missing_access, # client removed
ERROR_CODES.missing_permissions, # permissions changed meanwhile
ERROR_CODES.unknown_guild, # guild deleted
):
return None
raise
for invite in invites:
if invite.inviter is not SLASH_CLIENT:
continue
if invite.type is not InviteTargetType.none:
continue
max_age = invite.max_age
if (max_age is not None) and (max_age > 0):
continue
max_uses = invite.max_uses
if (max_uses is None) or (max_uses > 0):
continue
return invite.url
return None
@SLASH_CLIENT.interactions(is_global = True)
class koi_guilds:
NEXT_CACHE_AT = 0.0
async def __new__(cls, page: ('number', 'Page') = 1):
if cls.should_recache():
clear_cache()
cache_guilds()
return get_embed(page)
@classmethod
def should_recache(cls):
now = LOOP_TIME()
if cls.NEXT_CACHE_AT < now:
cls.NEXT_CACHE_AT = now + CACHING_INTERVAL
return True
return False
EMOJI_KOISHI_DERP = Emoji.precreate(772498743378575403)
@SLASH_CLIENT.interactions(
is_global = True,
required_permissions = Permission().update_by_keys(administrator = True),
)
async def koi_guilds_how_to(client, event):
"""How to become the best koi guild!"""
return Embed(
'The four steps of becoming a Koishi guild!',
).add_field(
'Step 1 | Koishi',
'The guild must be named after Koishi or a related topic!',
).add_field(
'Step 2 | Security',
'To avoid name abuse, each Koishi guild owner is required to join my support server.\n'
).add_field(
'Step 3 | Invite (Optional)',
(
f'For invite to show up, the guid must have either vanity invite, or an invite must be created with the '
f'{client.name_at(event.guild_id)}\'s `/invite-create` command.'
)
).add_field(
'Step 4 | Description (Optional)',
'After enabling the community feature in the guild, you will be able to set guild description.',
).add_footer(
'If the changes are not showing up instantly, do not worry! The command is updated periodically!'
)
@SLASH_CLIENT.interactions(
guild = GUILD__SUPPORT,
required_permissions = Permission().update_by_keys(administrator = True),
)
async def koi_guilds_recache():
cache_guilds()
return 'cache updated'
|
from rest_framework import mixins, parsers, permissions, viewsets
from rest_framework.settings import api_settings
from ..models import Boxart, Brand, ModelKit, Scale
from .filters import BrandFilter, ModelKitFilter, ScaleFilter
from .serializers import (
BoxartSerializer,
BrandSerializer,
CreateModelKitSerializer,
ModelKitSerializer,
ScaleSerializer,
)
class ModelKitViewSet(viewsets.ModelViewSet):
queryset = ModelKit.objects.select_related("scale", "brand")
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
serializer_class = ModelKitSerializer
filterset_class = ModelKitFilter
def get_serializer_class(self):
if self.action == "create":
return CreateModelKitSerializer
return super().get_serializer_class()
def perform_create(self, serializer):
serializer.save(submitter=self.request.user)
class BrandViewSet(mixins.CreateModelMixin, viewsets.ReadOnlyModelViewSet):
queryset = Brand.objects.all()
serializer_class = BrandSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filterset_class = BrandFilter
pagination_class = None
class ScaleViewSet(mixins.CreateModelMixin, viewsets.ReadOnlyModelViewSet):
queryset = Scale.objects.all()
serializer_class = ScaleSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
filterset_class = ScaleFilter
pagination_class = None
class BoxartViewSet(viewsets.ModelViewSet):
queryset = Boxart.objects.none()
serializer_class = BoxartSerializer
parser_classes = api_settings.DEFAULT_PARSER_CLASSES + [parsers.FileUploadParser]
def create(self, request, *args, **kwargs):
response = super().create(request, *args, **kwargs)
response.data["success"] = True
return response
|
# Uses python3
import sys
if __name__ == "__main__":
input = sys.stdin.read()
a, b = map(int, input.split())
numerator = max(a, b)
denominator = min(a, b)
while not denominator == 0:
x = denominator
denominator = numerator % denominator
numerator = x
print((a*b) // numerator)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 11:31:10 2018
@author: IkerVazquezlopez
"""
import sys
import pickle
import cv2
import gc
def obj_in_list(obj, loaded_list):
for e in loaded_list:
if obj.getID() == e[0]:
return True
return False
#%% MAIN METHOD
if len(sys.argv) != 4:
print(len(sys.argv))
print("Usage: python module_decoder.py tracker_path background_path obj_video_dir")
raise Exception("Decoder: main --> Input arguments != 4.")
tracker_path = sys.argv[1]
obj_video_dir = sys.argv[3]
f = open(tracker_path, 'rb')
tracker = pickle.load(f)
f.close()
loaded_videos = {}
background = cv2.imread(sys.argv[2])
fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')
#out0 = cv2.VideoWriter("reconstructed0.avi", fourcc, 30.0, (int(background.shape[1]/2),int(background.shape[0]/2)), True)
#out1 = cv2.VideoWriter("reconstructed1.avi", fourcc, 30.0, (int(background.shape[1]/2),int(background.shape[0]/2)), True)
#out2 = cv2.VideoWriter("reconstructed2.avi", fourcc, 30.0, (int(background.shape[1]/2),int(background.shape[0]/2)), True)
#out3 = cv2.VideoWriter("reconstructed3.avi", fourcc, 30.0, (int(background.shape[1]/2),int(background.shape[0]/2)), True)
print(len(tracker.getFrames()))
for frame in tracker.getFrames():
#background_frame = background.copy()
background_frame = cv2.imread(sys.argv[2])
#print(len(frame.getObjects()))
for obj in frame.getObjects():
if not str(obj.getID()) in loaded_videos:
obj_cap = cv2.VideoCapture(obj_video_dir + str(obj.getID()) + ".avi")
loaded_videos[str(obj.getID())] = obj_cap
ret, v_obj_frame = loaded_videos[str(obj.getID())].read()
#print(obj.getID(), ret)
#print(v_obj_frame.shape)
if not ret:
continue
x, y, _, _ = obj.getBbox()
h, w, _ = v_obj_frame.shape
if x+w > background_frame.shape[1]:
x = x-(x+w-background_frame.shape[1])
if y+h > background_frame.shape[0]:
y = y-(y+h-background_frame.shape[0])
background_frame[y:y+h, x:x+w] = v_obj_frame
print(frame.getID())
cv2.imwrite("../output/reconstructed_frames/" + str(frame.getID()) + ".png", background_frame)
#out0.write(background_frame[0:int(background.shape[1]/2), 0:int(background.shape[0]/2)])
#out1.write(background_frame[0:int(background.shape[1]/2), int(background.shape[0]/2):background.shape[0]])
#out2.write(background_frame[int(background.shape[1]/2):background.shape[1], 0:int(background.shape[0]/2)])
#out3.write(background_frame[int(background.shape[1]/2):background.shape[1], int(background.shape[0]/2):background.shape[0]])
background_frame = None
gc.collect()
#out0.release()
#out1.release()
#out2.release()
#out3.release()
|
from django.urls import path
from case_admin.views import common, case, comment, tag, user, question
app_name = "case_admin"
urlpatterns = [
path("users/", user.view_admin_user, name='users'),
path("users/review", user.view_admin_user_review, name='users_review'),
path("users/<int:user_id>", user.api_admin_user, name='api_users'),
path("cases/", case.view_admin_case, name='cases'),
path("cases/review", case.view_admin_case_review, name='cases_review'),
path("cases/<int:case_id>", case.api_admin_case, name='api_cases'),
path("questions/", question.view_admin_question, name='questions'),
path("questions/import", question.api_admin_question_import, name='tquestion_import'),
path("questions/<int:question_id>", question.api_admin_question, name='api_questions'),
path("tags/", tag.view_admin_tag, name='tags'),
path("tags/import", tag.api_admin_tag_import, name='tag_import'),
path("tags/<int:tag_id>", tag.api_admin_tag, name='api_tags'),
path("comments/", comment.view_admin_comment, name='comments'),
path("comments/review", comment.view_admin_comment_review, name='comments_review'),
path("comments/<int:comment_id>", comment.api_admin_comment, name='api_comments'),
path("", common.view_landing, name='default'),
]
|
import math
from Classifiers.custom_layers import FBetaLoss, HyperedgePoolingLayer
from Classifiers.base_model import BaseModel
import numpy as np
import torch
import torch.nn.functional as F
from tqdm import tqdm
# pylint: disable=no-member
class HGNNConvolution(torch.nn.Module):
"""Hypergraph convolution layer."""
def __init__(self, in_ft, out_ft, bias=True):
"""Creates an instance of hypergraph convolution layer.
Args:
in_ft: int. Input features size.
out_ft: int. Output features size.
"""
super(HGNNConvolution, self).__init__()
self.weight = torch.nn.Parameter(torch.Tensor(in_ft, out_ft))
if bias:
self.bias = torch.nn.Parameter(torch.Tensor(out_ft))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""Resets parameters."""
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, x, G):
"""Forward pass of Hypergraph convolution layer.
Args:
x: torch.Tensor. Features of nodes.
G: torch.Tensor. Forward propogation adjancency matrix as defined
for hypergraph convolution layer.
Returns:
torch.Tensor. Feature representations of next layer.
"""
x = x.matmul(self.weight)
if self.bias is not None:
x = x + self.bias
x = G.matmul(x)
return x
class HGNNHyperlinkPrediction(BaseModel):
"""HGNNHyperlinkPrediction model adapts HGNN architecture for the purpose
of link prediction.
The architecture is similar to HGNN, however, instead of predicting class
labels, the model predicts whether a particular set of nodes will form
a hyperedge.
"""
def __init__(
self, in_ch, n_hid, aggregate_method='max-pool',
link_pred_method='hadamard-product', dropout=0.5):
"""Creates an instance of HGNNHyperlinkPrediction classs.
Args:
in_ch: int. Input channels.
n_hid: int. No of hidden activation units.
aggregate_method: str. The method used for aggregating node
embeddings. It can be either 'max-pool', 'mean-pool',
'sag-pool', 'min-pairwise' or 'mean-pairwise'.
link_pred_method: str. The method used for link prediciton from
node embeddings. It can be either 'cosine', 'addition',
'hadamard-product', 'l1-weighted', 'l2-weighted'. Required only
if 'mean-pairwise' or 'min-pairwise' is used as
aggregate method.
dropout: float. Dropout probability.
"""
super(HGNNHyperlinkPrediction, self).__init__()
self.dropout = dropout
self.hgc1 = HGNNConvolution(in_ch, n_hid)
self.hgc2 = HGNNConvolution(n_hid, n_hid)
self.hedge_embedder = HyperedgePoolingLayer(
n_hid, aggregate_method, link_pred_method)
self.fc = torch.nn.Linear(n_hid, 1)
self.loss = torch.nn.BCELoss()
# self.loss = FBetaLoss(0.1)
self.optim = torch.optim.Adam(self.parameters())
self.aggregation_mathod = aggregate_method
self.link_pred_method = link_pred_method
def forward(self, x, hyperedges, G):
"""Forward pass of HGNN based Hyperedge predictor.
Args:
x: torch.Tensor. Embeddings of nodes.
hyperedges: list(torch.Tensor). Hyperedges for which prediction has
to happen.
G: torch.Tensor. Adjacency matrix of hypergraph.
Returns:
torch.Tensor. Output prediction for hyperedges.
"""
x = torch.relu(self.hgc1(x, G))
x = torch.dropout(x, self.dropout, train=self.training)
x = torch.relu(self.hgc2(x, G))
x = torch.dropout(x, self.dropout, train=self.training)
if 'pairwise' in self.aggregation_mathod:
preds = self.hedge_embedder(x, hyperedges)
else:
x = self.hedge_embedder(x, hyperedges)
preds = torch.sigmoid(self.fc(x))
return preds
def trainer(self, initial_embeddings, batch_gen, test_gen, G, Gtest, max_epoch=10):
"""Trains the model for given number of epochs.
Args:
initial_embeddings: torch.Tensor. The initial node embeddings. Note
that the size of initial embeddings must match input channels
as intialized in init function.
batch_gen: BatchGenerator. Batch generator object that generates
a batch of hyperedges and corresponding labels for train data.
test_gen: BatchGenerator. Batch generator object that generates
a batch of hyperedges and corresponding labels for test data.
G: torch.Tensor. Adjacency matrix for HGNN convolution as defined
by HGNN convolution operation.
Gtest: torch.Tensor. Adjacency matrix for HGNN convolution as defined
by HGNN convolution operation for test data.
max_epoch: int. Number of epochs to train for.
"""
epoch_bar = tqdm(total=max_epoch)
batch_bar = tqdm(
total=batch_gen.total_size // batch_gen.batch_size, leave=False,
desc='Iterator over batches.')
epoch_count = 0
epoch_loss = []
epoch_preds = []
epoch_labels = []
while epoch_count < max_epoch:
hyperedges, labels, epoch_bool = batch_gen.next()
self.optim.zero_grad()
preds = self.forward(initial_embeddings, hyperedges, G)
loss = self.loss(preds.squeeze(), labels)
loss.backward()
self.optim.step()
batch_bar.update()
epoch_loss.append(loss.detach().item())
epoch_preds.append(preds.detach())
epoch_labels.append(labels)
if epoch_bool:
epoch_count += 1
y_preds = torch.cat(epoch_preds)
y_true = torch.cat(epoch_labels)
report = self.get_report(y_true, y_preds)
self.print_report(report, epoch_count, np.mean(epoch_loss))
if test_gen:
y_preds = self.predict(initial_embeddings, test_gen, Gtest)
y_true = test_gen.get_labels()
test_report = self.get_report(y_true, y_preds)
test_loss = self.loss(y_preds, y_true).detach().item()
self.print_report(
test_report, epoch_count, test_loss, train=False)
self.add_summary(
epoch_count, report, np.mean(epoch_loss),
test_report, test_loss)
self.track_best_model(test_report['ROC'])
else:
self.add_summary(epoch_count, report, np.mean(epoch_loss))
self.track_best_model(report['ROC'])
epoch_bar.update()
batch_bar.close()
batch_bar = tqdm(
total=batch_gen.total_size // batch_gen.batch_size,
leave=False, desc='Iterator over batches.')
epoch_loss = []
epoch_preds = []
epoch_labels = []
batch_bar.close()
epoch_bar.close()
def predict(self, initial_embeddings, test_gen, G):
"""Predict function that predicts output for test hyperedges.
Args:
initial_embeddings: torch.Tensor. Initial embeddings of nodes.
test_gen: BatchGenerator. A batch generator that generates batches
of test data (only inputs).
G: torch.Tensor. Adjacency matrix for HGNN convolution as defined
by HGNN convolution operation.
Returns:
torch.Tensor. Output prediction for test hyperedges.
"""
self.eval()
is_last_batch = False
test_iterator = tqdm(
total=test_gen.total_size // test_gen.batch_size, leave=False,
desc='Iterator test over batches.')
predictions = []
while not is_last_batch:
hyperedges, is_last_batch = test_gen.next()
preds = self.forward(initial_embeddings, hyperedges, G)
predictions.append(preds.squeeze().detach())
test_iterator.update()
predictions = torch.cat(predictions)
self.train()
return predictions
def generate_laplacian_matrix_from_hypermatrix(self, H):
"""Generates adjacency matrix for HGNN convolution as defined
by HGNN convolution operation.
Args:
H: np.array. Hypergraph incidence matrix.
Returns:
np.array. The generated adjacency matrix.
"""
H = H.toarray()
n_edge = H.shape[1]
# the weight of the hyperedge
W = np.ones(n_edge)
# the degree of the node
DV = np.sum(H * W, axis=1)
# the degree of the hyperedge
DE = np.sum(H, axis=0)
invDE = np.mat(np.diag(np.power(DE, -1.0)))
DV2 = np.mat(np.diag(np.power(DV, -0.5)))
# There could be some nodes which are not part of any of the train edges
# but only appear at test edges.
DV2[np.isinf(DV2)] = 0
W = np.mat(np.diag(W))
H = np.mat(H)
HT = H.T
G = DV2 * H * W * invDE * HT * DV2
return torch.Tensor(G)
|
""" ------------------------------- Check Permutation -------------------------------------
Given two strings, S and T, check if they are permutations of each other. Return true or false.
Permutation means - length of both the strings should same and should contain same set of characters.
Order of characters doesn't matter.
#### Note : Input strings contain only lowercase english alphabets.
#### Input format :
Line 1 : String 1
Line 2 : String 2
#### Output format :
'true' or 'false'
#### Constraints :
0 <= |S| <= 10^7
0 <= |T| <= 10^7
where |S| represents the length of string, S.
#### Sample Input 1 :
abcde
baedc
#### Sample Output 1 :
true
#### Sample Input 2 :
abc
cbd
#### Sample Output 2 :
false
"""
def permutation(s1, s2):
freqCount = {}
for char in s1:
if char in freqCount:
freqCount[char] += 1
else:
freqCount[char] = 1
for char in s2:
if char in freqCount:
if freqCount[char]==1:
del freqCount[char]
else:
freqCount[char] -= 1
else:
return False
if freqCount:
return False
return True
# Main
s1=input()
s2=input()
if permutation(s1, s2):
print('true')
else:
print('false')
"""s1=input()
s2=input()
if len(s1)!=len(s2):
print("false")
else:
fa=[0]*256
for i in s1:
fa[ord(i)]+=1
for i in s2:
fa[ord(i)]-=1
for i in fa:
if i!=0:
print("false")
break
else:
print("true")
"""
'''
if len(s1)!=len(s2):
print("false")
else:
count=0
for i in s1:
for j in s2:
if i==j:
count+=1
s2=s2.replace(j,".",1)
break
if count==len(s1):
print("true")
else:
print("false")
''' |
import re
import string
def translation(s):
k = re.search('(AUG([ACGU]{3,3})+?)(UGA|UAA|UAG)', s)
codons = re.findall(r'...', k.group(1))
protein = ''
genetic_code = {'UUU': 'F', 'UUC': 'F', 'UUA': 'L', 'UUG': 'L', 'CUU': 'L', 'CUC': 'L', 'CUA': 'L', 'CUG': 'L',
'AUU': 'I', 'AUC': 'I', 'AUA': 'I', 'GUU': 'V', 'GUC': 'V', 'GUA': 'V', 'GUG': 'V', 'UCU': 'S',
'UCC': 'S', 'UCA': 'S', 'UCG': 'S', 'CCU': 'P', 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'ACU': 'T',
'ACC': 'T', 'ACA': 'T', 'ACG': 'T', 'GCU': 'A', 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'UAU': 'Y',
'UAC': 'Y', 'CAU': 'H', 'CAC': 'H', 'CAG': 'Q', 'CAA': 'Q', 'AAU': 'N', 'AAC': 'N', 'AAG': 'K',
'AAA': 'K', 'GAU': 'D', 'GAC': 'D', 'GAA': 'E', 'GAG': 'E', 'UGU': 'C', 'UGC': 'C', 'UGG': 'W',
'CGU': 'R', 'CGA': 'R', 'CGG': 'R', 'CGC': 'R', 'AGU': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GGU': 'G', 'GGC': 'G', 'GGA': 'G', 'GGG': 'G', 'AUG': 'M'}
for codon in codons:
for x in genetic_code:
if codon == x:
protein += genetic_code[x]
return protein
if __name__ == "__main__":
f = open('rosalind_prot.txt').readlines()
lst = []
for line in f:
line = line.replace('\n', '')
lst.append(line)
s = ''.join(lst)
print(translation(s)) |
import argparse
import pickle
import os
import sys
import random
random.seed(50)
src_dir = '/hpf/projects/brudno/marta/mimic_rs_collection/rs_sorted_alpha_cleaned/'
dest_dir = '/hpf/projects/brudno/marta/mimic_rs_collection/cuis_rs_20190315/'
def load_terms():
# src_file = '/hpf/projects/brudno/marta/mimic_rs_collection/closest_umls_terms_in_mimic_20190722_b.txt'
src_file = '/hpf/projects/brudno/marta/mimic_rs_collection/all_allacronym_expansions/closest_medical_concepts_casi_20200602_new.txt'
related_terms = {}
# abbr = ''
# exp = ''
with open(src_file) as f:
for line in f:
if ':::' in line:
header = line[:-1].split(",")
if len(header) == 2:
flag = True
abbr = header[0].split(":::")[1]
exp = header[1].split(":::")[1]
if abbr == exp or exp == 'remove' or exp == 'in vitro fertilscreeization':
flag = False
continue
if abbr not in related_terms:
related_terms[abbr] = {}
related_terms[abbr][exp] = []
else:
content = line[:-1].split('\t')
if len(content) > 2 and content[0] == '' and content[1] != 'closest_med_concept_in_mimic' and flag:
related_terms[abbr][exp].append([content[1], float(content[2])])
return related_terms
def load_samples(exp_, args):
root_dir = "/hpf/projects/brudno/marta/mimic_rs_collection/rs_sorted_alpha_cleaned"
start_char = exp_[0]
if start_char == "N":
start_char = 'num'
samples = []
try:
with open("{}/{}/{}".format(root_dir, start_char, exp_)) as exp_file:
for line in exp_file:
content = line.split("|")
if content[0] != exp_:
continue
samples.append(line[:-1])
except:
print("couldn't load file...............{}".format(exp_))
random.shuffle(samples)
num_samples = min(len(samples), args.max_samples)
sampled_sample = samples[:num_samples]
return sampled_sample
def generate_data(terms_dict, args):
dest_dir = "/hpf/projects/brudno/marta/mimic_rs_collection/casi_mimic_rs_dataset_20190723"
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
for abbr in terms_dict:
filename = '{}/{}_rs_close_umls_terms_20190723.txt'.format(dest_dir, abbr)
with open(filename, 'w') as fhandle:
for expansion in terms_dict[abbr]:
exp_ = ' '.join(expansion.split("_"))
terms = load_samples(exp_, args)
redone_samples = ["{}|{}|{}\n".format(exp_, 0, i) for i in terms]
for item in redone_samples:
fhandle.write(item)
for relative in terms_dict[abbr][expansion]:
rel = ' '.join(relative[0].split("_"))
distance = relative[1]
terms = load_samples(rel, args)
redone_samples = ["{}|{}|{}\n".format(exp_, distance, i) for i in terms]
for item in redone_samples:
fhandle.write(item)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--max_samples', default=1000, type=int)
args = parser.parse_args()
terms_dict = load_terms()
generate_data(terms_dict, args)
print("Done extracting rs samples! \U0001F4AA \U00002600")
if __name__ == "__main__":
main()
|
class Restaurant():
def __init__(self, restaurant_name, cuisine_type):
self.restaurant_name = restaurant_name
self.cuisine_type = cuisine_type
def describe_restaurant(self):
print(self.restaurant_name.title() + " is a(n) " + self.cuisine_type + " restaurant.")
def open_restaurant(self):
print("The restaurant is opening.")
pc = Restaurant("xiaojiangnan", "Chinese food")
print(pc.restaurant_name)
print(pc.cuisine_type)
pc.describe_restaurant()
pc.open_restaurant() |
# Assignment-3
'''
author : teja
date : 10/8/2018
module 11
'''
def is_valid_word(word_test, hand_word, word_list):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
"""
count = 0
word_test = list(word_test)
for i_1 in word_test:
if i_1 in hand_word:
count += count
if count == len(word_test) and word_test[i_1] == word_list[i_1]:
return True
return False
def main():
'''
main
'''
word_test = input()
n_1 = int(input())
adict = {}
for i_1 in range(n_1):
data = input()
l_1 = data.split()
i_1 += i_1
adict[l_1[0]] = int(l_1[1])
l_2 = input().split()
print(is_valid_word(word_test, adict, l_2))
if __name__ == "__main__":
main()
|
import pdb
import time
import pickle
import traceback
def get_num():
index = 0
with open('d:/bimbo/train.csv', 'r') as file:
line = file.readline()
while line:
index += 1
line = file.readline()
print index
def get_data():
with open('d:/bimbo/train.csv', 'r') as file:
for i in range(19):
line = file.readline()
print line
def handle_train_data():
data_dct = {}
with open('d:/bimbo/test.csv', 'r') as file:
line = file.readline()
# pdb.set_trace()
while line:
lst = line.strip().split(',')
if not data_dct.has_key(lst[3]):
data_dct[lst[3]] = {lst[2]:{lst[6]:{lst[4]:{lst[5]:lst[0]}}}}
else:
if not data_dct[lst[3]].has_key(lst[2]):
data_dct[lst[3]][lst[2]] = {lst[6]:{lst[4]:{lst[5]:lst[0]}}}
else:
if not data_dct[lst[3]][lst[2]].has_key(lst[6]):
data_dct[lst[3]][lst[2]][lst[6]] = {lst[4]:{lst[5]:lst[0]}}
else:
if not data_dct[lst[3]][lst[2]][lst[6]].has_key(lst[4]):
data_dct[lst[3]][lst[2]][lst[6]][lst[4]] = {lst[5]:lst[0]}
else:
data_dct[lst[3]][lst[2]][lst[6]][lst[4]][lst[5]] = lst[0]
line = file.readline()
with open('d:/bimbo/train.csv', 'r') as file:
line = file.readline()
index = 0
error = 0
while line:
# pdb.set_trace()
# if index > 190:
# break
lst = line.strip().split(',')
try:
id = data_dct[lst[2]][lst[1]][lst[5]][lst[3]][lst[4]]
# print id
index += 1
except:
# traceback.print_exc()
error += 1
pass
line = file.readline()
print index
print error
# file = open('data_dct', 'wb')
# pickle.dump(data_dct, file)
if __name__ == '__main__':
st = time.time()
# get_data()
# get_num()
handle_train_data()
ed = time.time()
print ed - st
|
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot = True)
# one_hot means that from the 10 outputs only one will be selected at a time
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
x = tf.placeholder(tf.float32, [None, 784]) # Here None means any size is ok. 784 = 28x28 pixels
y = tf.placeholder(tf.float32, [None, 10]) # each row is a one-hot 10-dimensional vector
def neural_network_model(data):
# initialize weights and biases (these are needed in case all inputs are zeros)
# with random numbers
hidden_1_layer = {'weights':tf.Variable(tf.truncated_normal([784, n_nodes_hl1], stddev=0.1)),
'biases':tf.Variable(tf.constant(0.1, shape=[n_nodes_hl1]))}
hidden_2_layer = {'weights':tf.Variable(tf.truncated_normal([n_nodes_hl1, n_nodes_hl2], stddev=0.1)),
'biases':tf.Variable(tf.constant(0.1, shape=[n_nodes_hl2]))}
hidden_3_layer = {'weights':tf.Variable(tf.truncated_normal([n_nodes_hl2, n_nodes_hl3], stddev=0.1)),
'biases':tf.Variable(tf.constant(0.1, shape=[n_nodes_hl3]))}
output_layer = {'weights':tf.Variable(tf.truncated_normal([n_nodes_hl3, n_classes], stddev=0.1)),
'biases':tf.Variable(tf.constant(0.1, shape=[n_classes])),}
# y = x * Weights + Bias
# apply the ReLU function
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weights']) + output_layer['biases']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
# cost or loss or cross_entropy function
cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )
# Training the model is done by repeatedly runnint this optimizer operation
optimizer = tf.train.AdamOptimizer(1e-4).minimize(cost) # select optimization algorithm
# Test model accuracy by checking how many predictions matches to their labels.
# argmax gives the index of highest entry along some axis. Output is list of booleans
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
# Cast to floating point numbers and take the mean
# F.ex. [True, False, True, True] --> [1, 0, 1, 1] --> 0.75
accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))
hm_epochs = 10 # Select how many training cycles
with tf.Session() as sess: # Open and close session with with syntax
sess.run(tf.initialize_all_variables())
# Training cycles
for epoch in range(hm_epochs):
epoch_loss = 0
for _ in range(int(mnist.train.num_examples/batch_size)): # train all batches
epoch_x, epoch_y = mnist.train.next_batch(batch_size)
# Fit the model and calculate cost
_, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
epoch_loss += c # calculate cost per epoch
# Print the cost to see the training is improving
print('Epoch', epoch, 'completed out of',hm_epochs,'loss:',epoch_loss)
# Calculate the accuracy
print('Accuracy:',accuracy.eval(feed_dict={x:mnist.test.images, y:mnist.test.labels}))
train_neural_network(x)
|
"""Test the serialize function of Schema."""
import mock
import argo
@mock.patch.object(argo.Attr, 'serialize')
def test_schema_calls_attr(attr_serialize):
"""Test that schema serialize calls attr serialize with the correct value."""
class S(argo.Schema):
"""Test schema."""
key = argo.Attr()
S.serialize({'key': 1})
attr_serialize.assert_called_once_with({'key': 1})
|
from pandas import read_csv
from pandas import DataFrame
import pandas as pd
import time
# fix random seed for reproducibility
#numpy.random.seed(7)
db_type = "lstm5"
update_type = 'a'
month = '09'
day = '01'
date = month+'-'+day
num_epochs=5000
batch=2
num_experiments = 1
error_scores = list()
announcement_times = ["23:5","-3:5","-7:5","-11:5","-15:5","-19:5"]
announc_index = 0
#------------------------------------------------------------------------------------
# load dataset
#dataset = read_csv('convergence_features-aX.csv', header=0, index_col=0)
#dataset = read_csv('convergence_features-rrc00-wX.csv', header=0, index_col=0)
#dataset = read_csv('convergence_features-rrc01-aX.csv', header=0, index_col=0)
#cols = list(read_csv('convergence_features_lstm5_train-a.csv', nrows =1))
#print(cols)
#dataset = read_csv('convergence_features_'+db_type+'_previous_train-'+update_type+'.csv')
#dataset = read_csv('convergence_features-rrc00-a-previous-t5.csv')
dataset = read_csv('convergence_features-rrc00-a-previous-t5.csv')
#dataset.drop(' num_announc_pref',axis=1,inplace=True)
#dataset.drop(' num_withd_pref',axis=1,inplace=True)
values = dataset.values
# ensure all data is float
#values = values.astype('float32')
database_file = "num_events-"+update_type+"-t5.csv" #where you want the file to be downloaded to
csv = open(database_file, "w")
columnTitleRow = "time, num_events\n"
csv.write(columnTitleRow)
csv.close()
num_updates = 0
print "index_col:"
event_indexes = []
database_file = "num_events-"+update_type+"-t5.csv" #where you want the file to be downloaded to
csv = open(database_file, "a")
row = str(announcement_times[0]) + "," + str(0) + "\n"
csv.write(row)
start_time = time.time()
elapsed_time = 0
print "len(announcement_times): " + str(len(announcement_times))
for i in range(0,len(dataset.values)):
time_x = dataset.values[i]
#print time[0]
if announc_index + 1 >= len(announcement_times):
print "Reached announcement_times list length"
#announc_index = 0
break
if str(announcement_times[announc_index]) in str(time_x[0]):
num_updates = num_updates + 1
#print "Event " + str(announc_index+1) + ": " + str(num_updates) + " updates"
else:
end_time = time.time()
elapsed_time = (end_time - start_time) + elapsed_time
print elapsed_time, "seconds"
start_time = time.time()
print time_x[0]
row = str(announcement_times[announc_index+1]) + "," + str(num_updates) + "\n"
csv.write(row)
csv.close()
csv = open(database_file, "a")
event_indexes.append(num_updates)
announc_index = announc_index + 1
print "announc_index: " + str(announc_index)
num_updates = num_updates + 1
#num_updates = 0
print "event_indexes:"
print event_indexes
print "Total elapsed time: " + str(elapsed_time/60) + " minutes."
csv.close()
|
"""empty message
Revision ID: d4bc619881c2
Revises: c9c770549430
Create Date: 2019-02-17 16:02:46.249427
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd4bc619881c2'
down_revision = 'c9c770549430'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('operationlogs', sa.Column('remote_addr', sa.String(length=20), nullable=True))
op.create_index(op.f('ix_operationlogs_remote_addr'), 'operationlogs', ['remote_addr'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_operationlogs_remote_addr'), table_name='operationlogs')
op.drop_column('operationlogs', 'remote_addr')
# ### end Alembic commands ###
|
import cv2
import os
from scipy import stats
from math import *
import numpy as np
import pandas as pd
def CannyThreshold(lowThreshold, ori_img, gray):
# 阈值自适应二值化
detected_edges = cv2.GaussianBlur(gray, (3, 3), 0)
detected_edges = cv2.Canny(detected_edges, lowThreshold, lowThreshold * ratio, apertureSize=kernel_size)
dst = cv2.bitwise_and(ori_img, ori_img, mask=detected_edges) # just add some colours to edges from original image.
return dst
def drop_columns(data):
for c in data.columns:
if data[c].sum() < threshold:
data.drop(columns=[c], inplace=True)
data.drop(columns=[e], inplace=True) # 删除最后一列
return data
def drop_rows(img):
# 截出有效面积,用于找到顶点和计算斜率
img_new = np.array(img, dtype='float32')
e, g = img_new.shape[:2]
arr2 = img_new.sum(axis=1) # 每一行求和
df = pd.DataFrame(img_new) # 把像素点转化为dataframe
df.insert(len(df.columns), len(df.columns), arr2) # 最后一列插入每一行的和
df1 = pd.concat([df, (pd.DataFrame(df.sum()).T)]) # 最后一行插入每一列的和
img1_y1 = -1
img1_y2 = -1
img_1 = img_2 = df
for index, value in enumerate(df1[e]):
if value > 100000:
continue
if img1_y1 == -1:
if value > threshold:
img1_y1 = index
elif value < threshold:
img1_y2 = index
img_1 = df1[img1_y1:img1_y2]
break
img2_y1 = -1
for index, value in enumerate(df1[e]):
if value > 100000 or index < img1_y2:
continue
if img2_y1 == -1:
if value > threshold:
img2_y1 = index
elif value < threshold:
img2_y2 = index
img_2 = df1[img2_y1:img2_y2]
break
for c in img_1.columns:
if img_1[c].sum() < threshold or img_1[c].sum() > 30000:
img_1.drop(columns=[c], inplace=True)
for c in img_2.columns:
if img_2[c].sum() < threshold or img_2[c].sum() > 30000:
img_2.drop(columns=[c], inplace=True)
img_1_point = [[img_1.columns[0], img1_y1], [img_1.columns[-1], img1_y2]]
img_2_point = [[img_2.columns[0], img2_y1], [img_2.columns[-1], img2_y2]]
return img_1, img_2, img_1_point, img_2_point
# 旋转angle角度,缺失背景白色(255, 255, 255)填充
def rotate_bound_white_bg(image, angle):
# grab the dimensions of the image and then determine the
# center
h, w = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
# -angle位置参数为角度参数负值表示顺时针旋转; 1.0位置参数scale是调整尺寸比例(图像缩放参数),建议0.75
M = cv2.getRotationMatrix2D((cX, cY), angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
# borderValue 缺失背景填充色彩,此处为白色,可自定义
return cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REFLECT)
def cal_slope(img):
rows, cols = img.shape
thr_f = 100
# b1 = 0
points1 = []
for i in range(int(cols / 8 * 3), int(cols / 8 * 5)):
for y in range(rows):
if img.iloc[y, i] > thr_f:
points1.append([i, y])
break
slope = stats.linregress(points1)
slope1 = slope.slope
# flag = True
# for i in range(rows):
# if not flag:
# break
# for y in range(cols):
# if img.iloc[i, y] > thr_f:
# b1 = int(i - slope1 * y)
# print(i, y)
# flag = False
# break
degree = degrees(atan(slope1))
return degree
def process(img_path):
ori_img = cv2.imread(img_path)
bak_img = ori_img
gray = cv2.cvtColor(ori_img, cv2.COLOR_BGR2GRAY)
img = CannyThreshold(0, ori_img, gray)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
ret, img = cv2.threshold(gray, 20, 255, cv2.THRESH_BINARY)
e_kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
d_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10))
img1, img2, point1, point2 = drop_rows(img)
img1_degree = cal_slope(img1)
img2_degree = cal_slope(img2)
img1_rotated = rotate_bound_white_bg(bak_img, img1_degree)
img1_gray_rotated = rotate_bound_white_bg(np.array(img), img1_degree)
img1_gray_rotated = cv2.erode(img1_gray_rotated, e_kernel)
img1_gray_rotated = cv2.dilate(img1_gray_rotated, d_kernel)
img1_gray_rotated = cv2.erode(img1_gray_rotated, e_kernel)
img1_gray_rotated = cv2.erode(img1_gray_rotated, e_kernel)
ret, img1_gray_rotated = cv2.threshold(img1_gray_rotated, 20, 255, cv2.THRESH_BINARY)
img1_gray_rotated = cv2.dilate(img1_gray_rotated, d_kernel)
img1, img2, point1, point2 = drop_rows(img1_gray_rotated)
img1_final = img1_rotated[point1[0][1]:point1[1][1], point1[0][0]:point1[1][0]]
img2_rotated = rotate_bound_white_bg(bak_img, img2_degree)
img2_gray_rotated = rotate_bound_white_bg(np.array(img), img2_degree)
img2_gray_rotated = cv2.erode(img2_gray_rotated, e_kernel)
img2_gray_rotated = cv2.dilate(img2_gray_rotated, d_kernel)
img2_gray_rotated = cv2.erode(img2_gray_rotated, e_kernel)
img2_gray_rotated = cv2.erode(img2_gray_rotated, e_kernel)
ret, img2_gray_rotated = cv2.threshold(img2_gray_rotated, 20, 255, cv2.THRESH_BINARY)
img2_gray_rotated = cv2.dilate(img2_gray_rotated, d_kernel)
img2_gray_rotated = cv2.dilate(img2_gray_rotated, d_kernel)
img1, img2, point1, point2 = drop_rows(img2_gray_rotated)
img2_final = img2_rotated[point2[0][1]:point2[1][1], point2[0][0]:point2[1][0]]
return img1_final, img2_final
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help="the original image path")
parser.add_argument("-o", "--output", help="the output directory of result images")
args = parser.parse_args()
input_path = args.input
output_path = args.output
lowThreshold = 0
threshold = 2550
max_lowThreshold = 100
ratio = 3
kernel_size = 3
if not os.path.exists(input_path):
print("the input directory not exists, exit!!")
exit(0)
if not os.path.exists(output_path):
os.makedirs(output_path)
ori_files = os.listdir(input_path)
for ori_file in ori_files:
i_path = os.path.join(input_path, ori_file)
try:
img1, img2 = process(i_path)
print("process %s successful" % i_path)
o_img1_path = os.path.join(output_path, ori_file[:-4] + "_1" + ".png")
o_img2_path = os.path.join(output_path, ori_file[:-4] + "_2" + ".png")
cv2.imwrite(o_img1_path, img1)
cv2.imwrite(o_img2_path, img2)
except:
print("process %s failed" % i_path)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from get_data import get_kalman_data_10, get_data_10
from wave_filter import wave_mix
from wave_detect import wave_double
from setting import get_thh
from wave_filter import after_kalman
from get_feature import fourier_transform
if __name__ == '__main__':
file = './data/sample10/run/sample10_run02.txt'
times, ax, ay, az, wx, wy, wz, tx, ty, tz, height = get_data_10(file)
rows = len(times)
thh = 1000
mix = wave_mix(ax, ay, az, wx, wy, wz) # 平滑滤波
waves = wave_double(mix, thh)
thh = get_thh(mix, waves) # 自适应thh
waves = wave_double(mix, thh)
p1 = waves[26][0]
p3 = waves[29][2]
file = './data/data_after_kalman/sample10/run/run02.txt'
times, ax, ay, az, wx, wy, wz, tx, ty, tz, height = get_kalman_data_10(file)
# ax_walk = ax[waves[100][0]: waves[100][2]]
#
# # file = './data/data_after_kalman/sample10/run/run02.txt'
# times, ax, ay, az, wx, wy, wz, tx, ty, tz, height = get_kalman_data_10(file)
#
# thh = 1000
# mix = wave_mix(ax, ay, az, wx, wy, wz) # 平滑滤波
# waves = wave_double(mix, thh)
# thh = get_thh(mix, waves) # 自适应thh
# waves = wave_double(mix, thh)
#
# ax_run = ax[waves[20][0]: waves[20][2]]
#
# file = './data/data_after_kalman/sample10/upstairs/upstairs01.txt'
# times, ax, ay, az, wx, wy, wz, tx, ty, tz, height = get_kalman_data_10(file)
#
# thh = 1000
# mix = wave_mix(ax, ay, az, wx, wy, wz) # 平滑滤波
# waves = wave_double(mix, thh)
# thh = get_thh(mix, waves) # 自适应thh
# waves = wave_double(mix, thh)
#
# ax_upstairs = ax[waves[50][0]: waves[50][2]]
#
# file = './data/data_after_kalman/sample10/bicycle/bicycle04.txt'
# times, ax, ay, az, wx, wy, wz, tx, ty, tz, height = get_kalman_data_10(file)
#
# thh = 1000
# mix = wave_mix(ax, ay, az, wx, wy, wz) # 平滑滤波
# waves = wave_double(mix, thh)
# thh = get_thh(mix, waves) # 自适应thh
# waves = wave_double(mix, thh)
#
# ax_bicycle = ax[waves[100][0]: waves[100][2]]
#
#
#
# plt.figure(figsize= (6,6))
#
# plt.subplot(221)
# plt.title('Walk')
# plt.plot(ax_walk)
# # plt.xticks([])
# plt.yticks([])
#
# plt.subplot(222)
# plt.title('Run')
# plt.plot(ax_run)
# # plt.xticks([])
# plt.yticks([])
#
# plt.subplot(223)
# plt.title('Upstairs')
# plt.plot(ax_upstairs)
# # plt.xticks([])
# plt.yticks([])
#
# plt.subplot(224)
# plt.title('Bicycle')
# plt.plot(ax_bicycle)
# # plt.xticks([])
# plt.yticks([])
#
# plt.show()
# num = 3
# ax_new =ax[0:num]
# for i in range(num, len(ax)): # 9 --- rows-1
# sum_1 = 0
# for j in range(num + 1):
# sum_1 = sum_1 + ax[i + j - num] # 每10个求和
# avg_1 = sum_1 / (num + 1)
# ax_new.append(avg_1)
# af_ax, af_ay, af_az, af_wx, af_wy, af_wz = after_kalman(ax_new, ay, az, wx, wy, wz, rows)
sample_f = wy[p1:p3]
p_new = np.linspace(p1, p3, 100)
ax_fourier = fourier_transform(sample_f, p_new)
N = len(p_new)
length = np.arange(N)
half_length = length[range(int(N / 2))]
plt.figure(figsize=(5, 4))
plt.plot(half_length, ax_fourier, 'b')
plt.xticks([])
plt.yticks([])
plt.show()
# sample = wy[p1:p3]
# # sample = ax_new[p1:p3]
# # sample = af_ax[p1:p3]
# plt.figure(figsize= (5,4))
# plt.plot(range(len(sample)), sample, 'b-')
# plt.xticks([])
# plt.yticks([])
# plt.show()
# plt.subplot(211)
# # plt.plot(range(len(sample)), sample, 'yo')
# plt.plot(range(len(sample)), sample, 'b-')
# plt.xticks([])
# plt.yticks([])
#
# plt.subplot(212)
# plt.plot(range(len(wz)), wz, 'b-')
# plt.xticks([])
# plt.yticks([])
# plt.show()
|
import facebook
import os
import shutil
import requests
import simplejson
import yaml
from datetime import datetime
from dateutil import parser
from quik import FileLoader
import youtube_dl
import cgi
# This downloads all the content and build pages for all the posts.
# A second file called "indexer.py" will generate an index page that
# provides an overview and navigation of the content.
# Read in configuration
try:
stram = open("config.yml")
config = yaml.load(stram)
print("Configuration:")
print("====================")
print("Group ID:" + config["group_id"])
print("OAuth Token:" + config["oauth_access_token"])
group_id = config["group_id"]
oauth_access_token = config["oauth_access_token"]
download_group_videos = config["download_group_videos"]
download_other_sites_videos = config["download_other_sites_videos"]
download_other_groups_videos = config["download_other_groups_videos"]
max_pages = config["max_pages"]
posts_per_page = config["posts_per_page"]
except (IOError, TypeError, KeyError, yaml.parser.ParserError)as e:
print "There is a problem with the configuration file. Please see the readme for how to create this file."
print "\n\nConfiguration error: {0}".format(e.message + "\n")
quit()
# Checks to see if a directory exists, and if not, creates it.
def assure_dir_exists(path):
try:
os.makedirs(path)
return path
except OSError:
if not os.path.isdir(path):
raise
return path
# Set up directories.
icons_dir = assure_dir_exists(os.path.join("content", "user_icons"))
pics_dir = assure_dir_exists(os.path.join("content", "pictures"))
photos_dir = assure_dir_exists(os.path.join("content", "photos"))
css_dir = assure_dir_exists(os.path.join("content", "css"))
video_dir = assure_dir_exists(os.path.join("content", "videos"))
posts_dir = assure_dir_exists(os.path.join("content", "posts"))
# Copy over css files.
src_files = os.listdir("css")
for file_name in src_files:
full_file_name = os.path.join("css", file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, css_dir)
# Downloads a users profile picture in small and medium sizes.
def download_fb_image(fb_id):
file_name_sm = os.path.join("content", "user_icons", fb_id + ".jpg")
# disable since it is not used: reduce calls to graph api
#file_name_lg = os.path.join("content", "user_icons", fb_id + "_lg.jpg")
try:
if not os.path.exists(file_name_sm):
user = graph.get_connections(fb_id, "picture")
with open(file_name_sm, 'wb') as f:
f.write(user["data"])
#if not os.path.exists(file_name_lg):
# user = graph.request(fb_id + "/" + "picture", {"type" : "normal"})
# with open(file_name_lg, 'wb') as f:
# f.write(user["data"])
except simplejson.scanner.JSONDecodeError:
print "Oops! Problem parsing JSON, this occurs when trying to download a facebook profile picture."
# Downloads a photo given a url.
def download_picture(path, id, overwrite=False):
file_name = os.path.join("content", "pictures", id + ".jpg")
if overwrite or not os.path.exists(file_name):
r = requests.get(path, stream=True)
if r.status_code == 200:
with open(file_name, 'wb') as f:
for chunk in r.iter_content():
f.write(chunk)
# Returns all comments for a given post. If the comments are paginated
# just make a seperate call and download them all.
def process_comments(post):
# let's get all comments but with extra data field such as attachment
comments = graph.request(post["id"] + "/" + "comments", {'limit':'500', 'fields':'id,attachment,from,message'})["data"]
for com in comments:
download_fb_image(com["from"]["id"])
com["message"] = cgi.escape(com["message"]).replace('\n','<br />')
if 'attachment' in com and com["attachment"]["type"] == "photo":
# this is needed because it is not uncommon for graph api to fail in such case
# the template will use a link to the picture instead of the photo page
# knowing that the picture is in high res
if not create_photo_page(com["attachment"]["target"]["id"]) :
com["photo_error"] = True
download_picture(com["attachment"]["media"]["image"]["src"], com["id"])
return comments
# Creates a page for a large picture, including comments on that picture.
# return false if there was a problem
def create_photo_page(picture_id):
try:
post = graph.get_object(picture_id)
# for a reason I ignore the message from the post of this image
# is in the name...
if("name" in post):
post["name"] = cgi.escape(post["name"]).replace('\n','<br />')
if("message" in post):
post["message"] = cgi.escape(post["message"]).replace('\n','<br />')
loader = FileLoader('html')
template = loader.load_template('photo.html')
date = parser.parse(post["created_time"])
# TODO verify that the extension is correct...
download_picture(post["source"] + "?type=large", picture_id)
photo_url = os.path.join("..", "pictures", picture_id + ".jpg")
file_name = os.path.join("content", "photos", post["id"] + ".html")
# Download all the images for the comments.
if post.has_key("comments") :
post["all_comments"] = process_comments(post)
with open(file_name, 'wb') as f:
f.write(template.render({'post': post, 'date' : date, 'photo' : photo_url},
loader=loader).encode('utf-8'))
return True
except facebook.GraphAPIError as e:
print "Oops! failed to get this object:" + str(picture_id) + "\nError: "+e.message
return False
except KeyError as e:
print "Oops! Failed to find information for this image:" + str(picture_id) + "\nError: "+e.message
return False
# Creates a page for a video, including comments on that picture.
ydl = youtube_dl.YoutubeDL({'outtmpl': os.path.join('tmp', '%(id)s%(ext)s')})
ydl.add_default_info_extractors()
def create_video_page(post):
try:
loader = FileLoader('html')
template = loader.load_template('video.html')
date = parser.parse(post["created_time"])
video_id = post["id"]
# TODO actually use the template to generate a page...
src = ""
if(post.has_key("object_id")):
if not (download_other_groups_videos or download_group_videos): return
src = "https://www.facebook.com/photo.php?v=" + post["object_id"]
elif(post.has_key("source")):
if not download_other_sites_videos: return
src = post["source"]
elif(post.has_key("link")):
if not download_other_sites_videos: return
src = post["link"]
else:
return
# Download the video
result = ydl.extract_info(src, download=False)
if 'entries' in result:
# Can be a playlist or a list of videos
video = result['entries'][0]
else:
# Just a video
video = result
#print("Downloading Thumbnail: " + video["thumbnail"])
download_picture(video["thumbnail"], video_id, True)
video_name = video_id + "." + video["ext"]
video_url = os.path.join("content", "videos", video_name)
if not os.path.exists(video_url):
tempfile = video["id"] + video["ext"]
print "downloading " + video_name
result = ydl.extract_info(src, download=True)
os.rename(tempfile, video_url)
post["video"] = video_name
except facebook.GraphAPIError as e :
print "Download failed for :" + str(video_id) + "\nError: "+e.message
except youtube_dl.utils.DownloadError as e :
print "Download failed for :" + str(video_id) + "\nError: "+e.message
except KeyError as e :
print "Complex output for data on this video :" + str(video_id) + "\nError: "+e.message
# Create an index page.
def index_page(posts, pg_count, more_pages):
index_name = os.path.join("content", "posts", str(pg_count) + ".html")
with open(index_name, 'wb') as f:
loader = FileLoader('html')
template = loader.load_template('post.html')
f.write(template.render({'posts': posts, 'pg_count' : pg_count + 1, 'more_pages' : more_pages},
loader=loader).encode('utf-8'))
# Run through the posts individually and grab all the images
def prepare_post(post):
# Turn the created time into a real date object.
post["date"] = parser.parse(post["created_time"])
if(post.has_key("message")):
post["message"] = cgi.escape(post["message"]).replace('\n','<br />')
# Create a photo page if a photo exists.
if(post["type"] == "photo") :
create_photo_page(post["object_id"])
# Create a video page if a video exists.
if(post["type"] == "video") :
create_video_page(post)
# download any associated pictures.
if(post.has_key("picture")) :
download_picture(post["picture"], post["id"])
# Download all the images in the comments.
if post.has_key("comments") :
post["all_comments"] = process_comments(post)
# RECURSIVE - work through the feed, page by page, creating
# an index page for each.
def process_feed(feed, pg_count):
print("processing page #" + str(pg_count) + " (" + str(pg_count+1) + "/" + str(max_pages) + ")")
# create a parsed out time for each post
# and make sure you have /all/ the comments (not just the first page.)
for post in iter(feed["data"]):
prepare_post(post)
more_pages=False
if(pg_count < max_pages-1 and "paging" in feed and "next" in feed["paging"]):
more_pages=True
index_page(feed["data"], pg_count, more_pages)
if(more_pages):
req = requests.get(feed["paging"]["next"])
process_feed(req.json(), pg_count + 1)
# Here is where we kick it all off by grabbing the first page
# of the newsfeed for the group.
try:
graph = facebook.GraphAPI(oauth_access_token)
profile = graph.get_object(group_id)
feed = graph.get_connections(group_id, "feed", limit=posts_per_page)
if(len(feed) == 0):
print("\n\nERROR: No feed found for the group id '" + group_id + "' in your config.\n")
quit()
process_feed(feed, 0)
except facebook.GraphAPIError as e:
print "\n\nFacebook Graph API error({0}): {1}".format(e.type, e.message + "\n")
|
import random
from os.path import exists
import numpy as np
import json
TRIES = 1000
CLUSTER_TRIES = 100
class World_generator():
def generate_world(self, args):
# prepare world parameters
path = args['path']['default'] if 'path' in args else '.\_data\generated_worlds'
if not 'name' in args:
return [False, "world name required (add -name ______ to generateworld command"]
name = args['name']['default']
vertex_count = int(args['vn']['default']) if 'vn' in args else random.randint(5, 15)
wall_num = int(args['wn']['default']) if 'wn' in args else random.randint(3, 10)
cluster_count = int(args['cn']['default']) if 'cn' in args else random.randint(1, 5)
clustering_factor = float(args['cf']['default']) if 'cf' in args else 0
if not 0 <= clustering_factor <= 1:
return [False, "clustering factor must be between 0 and 1"]
min_st = int(args['st_min']['default']) if 'st_min' in args else 100
max_st = int(args['st_max']['default']) if 'st_max' in args else 600
if min_st > max_st:
return [False, "minimum starvation cannot be higher than maximum"]
p_min = float(args['p_min']['default']) if 'p_min' in args else 0.05
p_max = float(args['p_max']['default']) if 'p_max' in args else 0.3
if p_min > p_max:
return [False, "minimum probability cannot be higher than maximum"]
width = 600
height = 600
self.tile_size = 10
# TODO: implement random probability based on uniform distribution + probability factor
# max_pg = float(args['max_pg']['default'])
row_length = int(width/self.tile_size)+1
column_length = int(height/self.tile_size)+1
self.visit_points = []
self.walls = []
world_matrix = [[0 for _ in range(row_length)] for _ in range(column_length)]
# generate walls
rooms = self.divide(world_matrix, 0, 0, row_length, column_length, self.choose_orientation(row_length-1, column_length-1), wall_num)
# generate cluster positions
if(cluster_count > vertex_count):
return [False, "number of clusters cannot be higher than number of vertexes"]
clusters = []
if cluster_count < len(rooms):
rooms_with_clusteres = random.sample(rooms, cluster_count)
else:
rooms_with_clusteres = rooms
for i in range(0, cluster_count):
# choose room for cluster
room = rooms_with_clusteres.pop(0)
rooms_with_clusteres.append(room)
tries = 0
while True:
cx = random.randrange(room['x']+1, room['x']+room['width']-1)
cy = random.randrange(room['y']+1, room['y']+room['height']-1)
if self.check_area(world_matrix, cx, cy, 1):
clusters.append([cx, cy, room])
self.visit_points.append(self.create_visit_point(cx, cy, min_st, max_st, self.random_p(p_min, p_max)))
world_matrix[cy][cx] = 2
break
tries += 1
if tries > TRIES:
# try placing in different room
room = rooms_with_clusteres.pop(0)
rooms_with_clusteres.append(room)
tries = 0
# generate vertexes
for i in range(0, vertex_count - cluster_count):
tries = 0
placed = False
# use clustering factor to determine if vertex will belong to a cluster
if random.uniform(0, 1) <= clustering_factor:
# position vertex in one of the clusteres randomly
cluster_tries = 0
chosen_cluster = random.choice(clusters)
while True:
# using gaussian curve to determine position inside cluster
# clustering factor and number of failed placement attempts affects standard deviation of curve
vx = int(chosen_cluster[0] + np.random.normal(loc=0.0, scale=1 + 10*(1-clustering_factor) + 10*(tries/(TRIES))))
vy = int(chosen_cluster[1] + np.random.normal(loc=0.0, scale=1 + 10*(1-clustering_factor) + 10*(tries/(TRIES))))
if self.check_area(world_matrix, vx, vy, 1, chosen_cluster[2]):
placed = True
break
tries += 1
if tries > TRIES:
cluster_tries += 1
tries = 0
if cluster_tries > CLUSTER_TRIES:
break
chosen_cluster = random.choice(clusters)
if not placed:
# position vertex randomly
tries = 0
room = random.choice(rooms)
while True:
vx = random.randrange(room['x']+1, room['x']+room['width']-1)
vy = random.randrange(room['y']+1, room['y']+room['height']-1)
if self.check_area(world_matrix, vx, vy, 1):
break
tries += 1
if tries > TRIES:
tries = 0
room = random.choice(rooms)
self.visit_points.append(self.create_visit_point(vx, vy, min_st, max_st, self.random_p(p_min, p_max)))
world_matrix[vy][vx] = 2
# check parameters at hte beginning
if not exists(path):
return [False, "path does not exist"]
with open(path + '/' + name+'.world', "w+") as world_file:
try:
world_file.write(json.dumps({
"name": name,
"width": width,
"height": height,
"robot": {
"walk_speed": 10,
"start_point": 0
},
"obstacles": self.walls,
"visit_points": self.visit_points,
}))
return [True, ""]
except:
return [False, "there was a problem in creating ", name, '.py']
# --------------------------------------------------------------------
# 3. Helper routines
# --------------------------------------------------------------------
S, E = 1, 2
HORIZONTAL, VERTICAL = 1, 2
def choose_orientation(self, width, height):
if width >= 7 and height < 7:
return self.VERTICAL
elif height >= 7 and width < 7:
return self.HORIZONTAL
elif width < height:
return self.HORIZONTAL
elif height < width:
return self.VERTICAL
else:
return self.HORIZONTAL if random.randint(0, 1) == 0 else self.VERTICAL
# --------------------------------------------------------------------
# 4. The recursive-division wall generation algorithm
# --------------------------------------------------------------------
def divide(self, world_matrix, x, y, width, height, orientation, wall_num):
if (width < 7 and height < 7) or wall_num == 0 or width < 4 or height < 4:
return [{'x': x, 'y': y, 'width': width, 'height': height}]
horizontal = orientation == self.HORIZONTAL
# where will the wall be drawn from?
wx = x + (0 if horizontal else random.randrange(3, width-3))
wy = y + (random.randrange(3, height-3) if horizontal else 0)
# opening = random.randint(2, int(width/2 + .5)) if horizontal else random.randint(2, int(height/2 + .5))
opening = 2
# where will the passage through the wall exist?
start_of_wall = random.randint(0, 1)
if horizontal:
wx, wall_width = [wx + opening, width - opening] if start_of_wall else [wx, width - opening]
if start_of_wall:
if(wx+wall_width != len(world_matrix[0])):
wall_width += 1
else:
if wx != 0:
wx, wall_width = [wx-1, wall_width+1]
else:
wy, wall_height = [wy + opening, height - opening] if start_of_wall else [wy, height - opening]
if start_of_wall:
if(wy+wall_height != len(world_matrix)):
wall_height += 1
else:
if wy != 0:
wy, wall_height = [wy - 1, wall_height + 1]
# what direction will the wall be drawn?
dx = 1 if horizontal else 0
dy = 0 if horizontal else 1
# how long will the wall be?
length = wall_width if horizontal else wall_height
wall = [[wx*self.tile_size, wy*self.tile_size]]
wall.append([(wx+wall_width-1)*self.tile_size, wy*self.tile_size] if horizontal else [wx*self.tile_size, (wy+wall_height-1)*self.tile_size])
self.walls.append(wall)
for _ in range(length):
world_matrix[wy][wx] = 1
wx += dx
wy += dy
nx, ny = x, y
w, h = [width, wy-y] if horizontal else [wx-x, height]
wall_num -= 1
wall_distribution_factor = wy / (height-1) if horizontal else wx / (width-1)
wall_count1 = int(wall_distribution_factor*wall_num)
wall_count2 = wall_num - wall_count1
rooms = []
rooms += self.divide(world_matrix, nx, ny, w, h, self.choose_orientation(w, h), wall_count1)
nx, ny = [x, wy+1] if horizontal else [wx+1, y]
w, h = [width, y+height-wy-1] if horizontal else [x+width-wx-1, height]
rooms += self.divide(world_matrix, nx, ny, w, h, self.choose_orientation(w, h), wall_count2)
return rooms
def check_area(self, world_matrix, x, y, distance, room=None):
# check if within boundaries of the world
if x - distance < 0 or x + distance > len(world_matrix[0])-1 or y - distance < 0 or y + distance > len(world_matrix)-1:
return False
# check if within boundaries of room, if room exists
if room and (not room['x'] < x < room['x']+room['width'] or not room['y'] < y < room['y']+room['height']):
return False
empty = True
for i in range(x-distance, x + distance+1):
for j in range(y - distance, y + distance+1):
empty = empty and world_matrix[j][i] == 0
return empty
def create_visit_point(self, x, y, min_st, max_st, probability):
return {
"position": [x*self.tile_size, y*self.tile_size],
"starvation": random.randrange(min_st, max_st, 10),
"probability": probability
}
def random_p(self, min_p, max_p):
if min_p == max_p:
return min_p
min_p = 0.01 if min_p < 0.01 else min_p
max_p = 0.05 if max_p < 0.05 else max_p
return random.randrange(int(min_p*100), int(max_p*100), 2)/100
|
class Solution:
# O(n) time, O(n) space
def findErrorNums(self, nums: List[int]) -> List[int]:
s=set()
for num in nums:
if num not in s:
s.add(num)
else:
num1=num
for i in range(1,len(nums)+1):
if i in s:
continue
else:
num2=i
return [num1,num2]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 16:12:10 2019
@author: Anthony
"""
# From the ashes of version 5 version 6 returns anew.
# I'm starting to make enough changes in the formatting that a new version number is in order
# Nothing fundamentally has changed, it's just significantly prettier
###############################################################################
# Section 0: Importing Libraries
###############################################################################
# Responsible for creating all of our pretty graphs at the end
import matplotlib.pyplot as plt
# The bread and butter of working with scientific Python
# Goal is to take more use from this through arrays
import numpy as np
# Very excellent for dealing with files
import tkinter as tk
# Helps us pull up that nice little dialogue box
import tkinter.filedialog as fd
# Really just after curve_fit right now
import scipy as sc
from scipy.optimize import curve_fit
# Used for dealing with directories and whatnot
import os
# regexp has a wonderful little thing to pull out bits from strings
# Need this for pulling variable data from
import re
###############################################################################
# Section 1: Loading in files
###############################################################################
# This section is all about finding all of the data from whatever directory
# A diaglogue window will popup asking for where the folders are
# You'll tell it the overarching folder where all of the data folders are
# Data folders should be in the form ***Force_***Potential
# It will pull the Force for each data set based on that folder name
# This diaglogue window will start wherever initialdir says
# You can always just move away from that folder
# If it throughs an error, change so initialdir = '/'
root = tk.Tk()
root.withdraw()
startdir = fd.askdirectory(initialdir = '/',title='Where are the folders?')
#startdir = '/home/anthony'
# Then tell Python where we're going to be working from
os.chdir(startdir)
print('Working out of %s' % startdir)
# And we'll figure out everything that's in the folder we've selected
biglist = (os.listdir(startdir))
# But now we need to isolate only the folders with data that we want
# We'll save them all here
foldlist = []
# Now we check if the folders match our naming scheme
for i in range(0, len(biglist)):
# This is kind of a sloppy workaround, but whatever
if 'Pot' in biglist[i]:
foldlist.append(biglist[i])
# Now we'll have a list of only the folders with good stuff in them
# We're going to make a list of all data
datalist = []
# It'd be nice to know what forces and gammas are used too
forcelist = []
gammalist = []
for i in range(0,len(foldlist)):
# Start with a blank file list each time
# This creates a string of the path name for a particular data folder
path = (startdir + '/' + foldlist[i])
# Now we change directory to that particular folder
# Remember to change back afterwards!
os.chdir(path)
# And produce a list of those files in our folder
filelist = (os.listdir(path))
for j in range(0,len(filelist)):
# Now we're going to go through a load each of the files
# Give this thing a name for what we're going to be saving
# tempname is going to be the ***Gam.dat file
tempname = filelist[j]
# Combine with folder name, trim off '.dat'
newname = foldlist[i] + '_' + tempname[:-4]
# Load a data file into a temporary variable
# This now has six columns of data we want
tempload = np.genfromtxt(filelist[j])
# From the file name,
rs = re.findall("[-+]?[.]?[\d]+(?:,\d\d\d)*[\.]?\d*(?:[eE][-+]?\d+)?",newname)
tempgam = float(rs[2])
gammalist.append(tempgam)
tempforce = float(rs[0])
forcelist.append(tempforce)
for k in range(0,len(tempload)):
# This is in a particular order for data
# Element 1 is the force F0, element 2 is the gamma
# element 3 is the phi, element 4 is the average velocity
# element 5 is the error of that velocity, 6 is the diffusion
# 7 the error in diffusion, 8 the Peclet number
# Filthy backend, clean frontend
temptuple = (tempforce,tempgam,tempload[k,0],tempload[k,1],tempload[k,2],tempload[k,3],tempload[k,4],tempload[k,5])
datalist.append(temptuple)
# Now we have this big ol' list of all the numbers we want
# Turn it into an array
darray = np.array(datalist)
# Now any time we want a particular value, we just have to use
# array[array[:,*] == value]
# Lastly, remove any duplicates from the forces and gammas
forcelist = list(set(forcelist))
gammalist = list(set(gammalist))
gammalist.sort(key=float)
forcelist.sort(key=float)
###############################################################################
# Section 2: Analysis
###############################################################################
# We don't really need the breaking apart like from version 2
# Give the function that we'll be fitting with
# x is the phis we feed in
def func(x,A,phi0):
return A*(np.sin(x-phi0))
fittedlist = []
for i in range(0,len(forcelist)):
# Let's pull out all of the data with a single force
tempforcearray = darray[darray[:,0] == forcelist[i]]
# Now we want to do a similar thing to get the gammas pulled out individually
for j in range(0,len(gammalist)):
# This is an array of ONLY one force and one gamma at a time
tempgammaarray = tempforcearray[tempforcearray[:,1] == gammalist[j]]
# Fit our function to the data from this set
# fittemp[0] is A, fittemp[1] is phi0
# Set bounds so that A can't be smaller than 0
# phi can only be between negative and positive pi
fittemp,fiterror = curve_fit(func,tempgammaarray[:,2],tempgammaarray[:,3],bounds=((0,-.6*np.pi),(1000000,1.5*np.pi)))
# Now we'll save the force, gamma, A, and phi0
temptuple = (forcelist[i],gammalist[j],fittemp[0],fittemp[1])
fittedlist.append(temptuple)
# And I like these NumPy arrays so I'll convert the fittedlist
fittedarray = np.array(fittedlist)
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#
# Doing the Peclet Shit
#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#!#
peclist = []
for i in range(0,len(forcelist)):
# Let's pull out all of the data with a single force
tempforcearray = darray[darray[:,0] == forcelist[i]]
# Now we want to do a similar thing to get the gammas pulled out individually
for j in range(0,len(gammalist)):
# This is an array of ONLY one force and one gamma at a time
tempgammaarray = tempforcearray[tempforcearray[:,1] == gammalist[j]]
# ...but instead finds the max Peclet number of all of them
# Just in case that number isn't exactly at the pi/2 phase
maxPec = max(tempgammaarray[:,7])
# Same old method of saving to a tuple
temptuple = (forcelist[i],gammalist[j],maxPec)
# Then putting it all into a big list of tuples
peclist.append(temptuple)
# Now we define a function for plotting like the later sections
def pecplot():
# Convert that list of tuples into a Numpy array
pecarray = np.asarray(peclist)
# Then break it into generic x, y, and z
x = pecarray[:,0] # This is magnitude of force F0
y = pecarray[:,1] # This is gammaP value
z = pecarray[:,2] # This is the corresponding Peclet number
fig = plt.figure()
ax = plt.gca(projection='3d')
surf=ax.plot_surface(x, y, z)
fig.colorbar(surf)
plt.show()
###############################################################################
# Secton 3: Plotting
###############################################################################
# Now all that's left is plotting the points together
colorset = ['r','b','g','m']
def phiplot():
for i in range(0,len(forcelist)):
# Now we're doing the same thing as last section with data...
tempforcearray = darray[darray[:,0] == forcelist[i]]
#...but also doing the same thing for pulling the A and phi0 values
tempfitarray = fittedarray[fittedarray[:,0] == forcelist[i]]
for j in range(0,len(gammalist)):
tempgammaarray = tempforcearray[tempforcearray[:,1]==gammalist[j]]
tmpfitarray2 = tempfitarray[tempfitarray[:,1] == gammalist[j]]
aaa = len(gammalist)
color = ((aaa-j)/aaa,0,j/aaa)
plt.plot(tempgammaarray[:,2],tempgammaarray[:,3],'o',color=colorset[j],label=r"$\Gamma' = %s\omega_r$" % gammalist[j],xunits=np.radians)
ydata = func(tempgammaarray[:,2],tmpfitarray2[0,2],tmpfitarray2[0,3],)
plt.plot(tempgammaarray[:,2],ydata,color=colorset[j],xunits=np.radians)
plt.hlines(0,0,2*np.pi,colors='k',alpha=0.5,linestyles='--')
ax = plt.gca()
plt.legend(loc='center left', bbox_to_anchor=(1,0.5))
plt.ylabel(r'$Average\ velocity\ \langle v\rangle /v_r$')
plt.xlabel(r'$\phi\ (radians)$')
ax.set_xticks([0,.5*np.pi,np.pi,1.5*np.pi,2*np.pi])
ax.set_xticklabels(["$0$",r"$\frac{1}{2}\pi$",r"$\pi$",r"$\frac{3}{2}\pi$",r"$2\pi$"])
plt.title(r'$Average\ velocity\ for\ F_o = %s F_r$' % forcelist[i])
plt.show()
# Now we'll plot the A and phi0 values
# Start with A
def IplotA():
for i in range(0,len(forcelist)):
tempforcearray = fittedarray[fittedarray[:,0] == forcelist[i]]
plt.plot(tempforcearray[:,1],tempforcearray[:,2],label=r"$F_o = %s$" %forcelist[i])
plt.xlabel(r"$\Gamma'/\omega_r$")
plt.ylabel('Atomic current '+r'$I$')
#plt.title(r'$Fitted\ amplitude\ versus\ scattering\ rate$')
plt.title('Atomic current '+r'$I$'+' versus scattering rate')
plt.legend(loc='center left', bbox_to_anchor=(1,0.5))
plt.show()
def IplotB():
for i in range(0,len(gammalist)):
tempgammaarray = fittedarray[fittedarray[:,1] == gammalist[i]]
plt.plot(tempgammaarray[:,0],tempgammaarray[:,2],label=r"$\Gamma'=%s\omega_r$" % gammalist[i])
plt.xlabel(r"$Driving\ force\ F_o/F_r$")
plt.ylabel('Atomic Current '+r'$I$')
plt.title('Atomic current '+r'$I$'+' versus force')
plt.legend(loc='center left', bbox_to_anchor=(1,0.5))
plt.show()
# And now the phi0 values
def phiplotA():
for i in range(0,len(forcelist)):
tempforcearray = fittedarray[fittedarray[:,0] == forcelist[i]]
plt.plot(tempforcearray[:,1],tempforcearray[:,3],label=r"$F_o = %s$" %forcelist[i])
ax = plt.gca()
ax.set_yticks([0,.5*np.pi,np.pi])
ax.set_yticklabels(["$0$",r"$\frac{1}{2}\pi$",r"$\pi$"])
plt.xlabel(r"$Scattering\ rate\ \Gamma'/\omega_r$")
plt.ylabel(r"$Phase\ \phi_o\ (radians)$")
plt.title('Phase offset versus scattering rate')
plt.legend(loc='center left', bbox_to_anchor=(1,0.5))
plt.show()
def phiplotB():
for i in range(0,len(gammalist)):
tempgammaarray = fittedarray[fittedarray[:,1] == gammalist[i]]
plt.plot(tempgammaarray[:,0],tempgammaarray[:,3],label=r"$\Gamma'=%s\omega_r$" % gammalist[i])
ax = plt.gca()
plt.xlabel(r"$Driving\ force\ F_o/E_r$")
ax.set_yticks([0,.5*np.pi,np.pi,1.5*np.pi])
ax.set_yticklabels(["$0$",r"$\frac{1}{2}\pi$",r"$\pi$",r"$\frac{3}{2}\pi$"])
plt.ylabel(r"$Phase\ \phi_o\ (radians)$")
plt.title('Phase offset versus driving force')
plt.legend(loc='center left', bbox_to_anchor=(1,0.5))
plt.show()
# Here is the tkinter/menu mini-section
# All plotting should be defined as functions
# Attach those functions to buttons in a pop-up menu
# Allows individualized and specific plotting
# Version 4.2 is looking to turn the tk section into a class rather than lines
# Goal is to fix the bug where an extra tk window pops up
root = tk.Tk()
frame = tk.Frame(root)
frame.pack()
plotopt1 = tk.Button(frame,text="Phi plot",command=phiplot)
plotopt1.pack()
plotopt2 = tk.Button(frame,text="Amplitude versus gamma",command=IplotA)
plotopt2.pack()
plotopt3 = tk.Button(frame,text="Amplitude versus force",command=IplotB)
plotopt3.pack()
plotopt4 = tk.Button(frame,text="Phase versus gamma",command=phiplotA)
plotopt4.pack()
plotopt5 = tk.Button(frame,text="Phase versus force",command=phiplotB)
plotopt5.pack()
plotopt6 = tk.Button(frame,text="Peclet Surface",command=pecplot)
plotopt6.pack()
button = tk.Button(frame,text="(Click to exit)",fg="red",command=root.destroy)
button.pack(side="bottom")
tk.mainloop() |
print("dasdasda")
zmienna = 1
print(zmienna)
zmienna2 = zmienna*0.5
print(zmienna2)
rzeczywista = float(35)
print(rzeczywista)
x=4.3
print(x)
print("%20f"%x)
print(x)
a,b =5,10
print(a)
print(b)
print(a+b)
napis = "To jest liczba parzysta"
if isinstance(a, int):
print( "%d %s" %(a, napis))
# else:
# print(a % " To nie jest liczba calkowita")
|
from keras.layers import Input, Dense
from keras.datasets import mnist
from keras.models import Model
import matplotlib.pyplot as plt
# Model configuration
img_width, img_height = 28, 28
initial_dimension = img_width * img_height
# Load MNIST dataset
(input_train, target_train), (input_test, target_test) = mnist.load_data()
# Reshape data
input_train = input_train.reshape(input_train.shape[0], initial_dimension)
input_test = input_test.reshape(input_test.shape[0], initial_dimension)
input_shape = (initial_dimension, )
# Parse numbers as floats
input_train = input_train.astype('float32')
input_test = input_test.astype('float32')
# Normalize data
input_train = input_train / 255
input_test = input_test / 255
# Define the layers
encoded_dim = 50
inputs = Input(shape=input_shape)
encoding_layer = Dense(encoded_dim, activation='relu', kernel_initializer='he_normal')(inputs)
decoding_layer = Dense(initial_dimension, activation='sigmoid')(encoding_layer)
# Instantiate the autoencoder
autoencoder = Model(inputs, decoding_layer, name='full_autoencoder')
# Instantiate the encoder
encoder = Model(inputs, encoding_layer, name='encoder')
# Instantiate the decoder
encoded_input = Input(shape=(encoded_dim, ))
# Compile the autoencoder
encoder.compile(optimizer='adam', loss='binary_crossentropy')
autoencoder.compile(optimizer='adam', loss='binary_crossentropy')
# Fit data
autoencoder.fit(input_train, input_train, epochs=10, batch_size=128, validation_split=0.2)
# Visualize a sample
input_sample = input_test[:1]
reconstruction = autoencoder.predict([input_sample])
# Plot the sample input and reconstruction
fig, axes = plt.subplots(1, 2)
fig.set_size_inches(6, 3.5)
input_sample_reshaped = input_sample.reshape((img_width, img_height))
reconstruction_reshaped = reconstruction.reshape((img_width, img_height))
axes[0].imshow(input_sample_reshaped)
axes[0].set_title('Original image')
axes[1].imshow(reconstruction_reshaped)
axes[1].set_title('Reconstruction')
plt.show()
# Visualize encoded state
encoded_imgs = encoder.predict(input_test[:1])
plt.figure(figsize=(20, 8))
plt.imshow(encoded_imgs[0].reshape(2, 25).T)
plt.gray()
plt.show()
|
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for `manila.wsgi`."""
import os.path
import ssl
import tempfile
import ddt
import eventlet
import mock
from oslo_config import cfg
from oslo_utils import netutils
import six
from six.moves import urllib
import testtools
import webob
import webob.dec
from manila.api.middleware import fault
from manila import exception
from manila import test
from manila import utils
import manila.wsgi
CONF = cfg.CONF
TEST_VAR_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
'var'))
class TestLoaderNothingExists(test.TestCase):
"""Loader tests where os.path.exists always returns False."""
def test_config_not_found(self):
self.assertRaises(
manila.exception.ConfigNotFound,
manila.wsgi.Loader,
'nonexistent_file.ini',
)
class TestLoaderNormalFilesystem(test.TestCase):
"""Loader tests with normal filesystem (unmodified os.path module)."""
_paste_config = """
[app:test_app]
use = egg:Paste#static
document_root = /tmp
"""
def setUp(self):
super(TestLoaderNormalFilesystem, self).setUp()
self.config = tempfile.NamedTemporaryFile(mode="w+t")
self.config.write(self._paste_config.lstrip())
self.config.seek(0)
self.config.flush()
self.loader = manila.wsgi.Loader(self.config.name)
self.addCleanup(self.config.close)
def test_config_found(self):
self.assertEqual(self.config.name, self.loader.config_path)
def test_app_not_found(self):
self.assertRaises(
manila.exception.PasteAppNotFound,
self.loader.load_app,
"non-existent app",
)
def test_app_found(self):
url_parser = self.loader.load_app("test_app")
self.assertEqual("/tmp", url_parser.directory)
@ddt.ddt
class TestWSGIServer(test.TestCase):
"""WSGI server tests."""
def test_no_app(self):
server = manila.wsgi.Server("test_app", None, host="127.0.0.1", port=0)
self.assertEqual("test_app", server.name)
def test_start_random_port(self):
server = manila.wsgi.Server("test_random_port", None, host="127.0.0.1")
server.start()
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
@testtools.skipIf(not netutils.is_ipv6_enabled(),
"Test requires an IPV6 configured interface")
@testtools.skipIf(utils.is_eventlet_bug105(),
'Eventlet bug #105 affect test results.')
def test_start_random_port_with_ipv6(self):
server = manila.wsgi.Server("test_random_port",
None,
host="::1")
server.start()
self.assertEqual("::1", server.host)
self.assertNotEqual(0, server.port)
server.stop()
server.wait()
def test_start_with_default_tcp_options(self):
server = manila.wsgi.Server("test_tcp_options",
None,
host="127.0.0.1")
self.mock_object(
netutils, 'set_tcp_keepalive')
server.start()
netutils.set_tcp_keepalive.assert_called_once_with(
mock.ANY, tcp_keepalive=True, tcp_keepalive_count=None,
tcp_keepalive_interval=None, tcp_keepidle=600)
def test_start_with_custom_tcp_options(self):
CONF.set_default("tcp_keepalive", False)
CONF.set_default("tcp_keepalive_count", 33)
CONF.set_default("tcp_keepalive_interval", 22)
CONF.set_default("tcp_keepidle", 11)
server = manila.wsgi.Server("test_tcp_options",
None,
host="127.0.0.1")
self.mock_object(
netutils, 'set_tcp_keepalive')
server.start()
netutils.set_tcp_keepalive.assert_called_once_with(
mock.ANY, tcp_keepalive=False, tcp_keepalive_count=33,
tcp_keepalive_interval=22, tcp_keepidle=11)
def test_app(self):
self.mock_object(
eventlet, 'spawn', mock.Mock(side_effect=eventlet.spawn))
greetings = 'Hello, World!!!'
def hello_world(env, start_response):
if env['PATH_INFO'] != '/':
start_response('404 Not Found',
[('Content-Type', 'text/plain')])
return ['Not Found\r\n']
start_response('200 OK', [('Content-Type', 'text/plain')])
return [greetings]
server = manila.wsgi.Server(
"test_app", hello_world, host="127.0.0.1", port=0)
server.start()
response = urllib.request.urlopen('http://127.0.0.1:%d/' % server.port)
self.assertEqual(six.b(greetings), response.read())
# Verify provided parameters to eventlet.spawn func
eventlet.spawn.assert_called_once_with(
func=eventlet.wsgi.server,
sock=mock.ANY,
site=server.app,
protocol=server._protocol,
custom_pool=server._pool,
log=server._logger,
socket_timeout=server.client_socket_timeout,
keepalive=manila.wsgi.CONF.wsgi_keep_alive,
)
server.stop()
@ddt.data(0, 0.1, 1, None)
def test_init_server_with_socket_timeout(self, client_socket_timeout):
CONF.set_default("client_socket_timeout", client_socket_timeout)
server = manila.wsgi.Server(
"test_app", lambda *args, **kwargs: None, host="127.0.0.1", port=0)
self.assertEqual(client_socket_timeout, server.client_socket_timeout)
@testtools.skipIf(six.PY3, "bug/1482633")
def test_app_using_ssl(self):
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = manila.wsgi.Server(
"test_app", hello_world, host="127.0.0.1", port=0)
server.start()
if hasattr(ssl, '_create_unverified_context'):
response = urllib.request.urlopen(
'https://127.0.0.1:%d/' % server.port,
context=ssl._create_unverified_context())
else:
response = urllib.request.urlopen(
'https://127.0.0.1:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
@testtools.skipIf(not netutils.is_ipv6_enabled(),
"Test requires an IPV6 configured interface")
@testtools.skipIf(utils.is_eventlet_bug105(),
'Eventlet bug #105 affect test results.')
@testtools.skipIf(six.PY3, "bug/1482633")
def test_app_using_ipv6_and_ssl(self):
CONF.set_default("ssl_cert_file",
os.path.join(TEST_VAR_DIR, 'certificate.crt'))
CONF.set_default("ssl_key_file",
os.path.join(TEST_VAR_DIR, 'privatekey.key'))
greetings = 'Hello, World!!!'
@webob.dec.wsgify
def hello_world(req):
return greetings
server = manila.wsgi.Server("test_app",
hello_world,
host="::1",
port=0)
server.start()
if hasattr(ssl, '_create_unverified_context'):
response = urllib.request.urlopen(
'https://[::1]:%d/' % server.port,
context=ssl._create_unverified_context())
else:
response = urllib.request.urlopen(
'https://[::1]:%d/' % server.port)
self.assertEqual(greetings, response.read())
server.stop()
def test_reset_pool_size_to_default(self):
server = manila.wsgi.Server("test_resize", None, host="127.0.0.1")
server.start()
# Stopping the server, which in turn sets pool size to 0
server.stop()
self.assertEqual(0, server._pool.size)
# Resetting pool size to default
server.reset()
server.start()
self.assertEqual(1000, server._pool.size)
class ExceptionTest(test.TestCase):
def _wsgi_app(self, inner_app):
return fault.FaultWrapper(inner_app)
def _do_test_exception_safety_reflected_in_faults(self, expose):
class ExceptionWithSafety(exception.ManilaException):
safe = expose
@webob.dec.wsgify
def fail(req):
raise ExceptionWithSafety('some explanation')
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn('{"computeFault', six.text_type(resp.body), resp.body)
expected = ('ExceptionWithSafety: some explanation' if expose else
'The server has either erred or is incapable '
'of performing the requested operation.')
self.assertIn(expected, six.text_type(resp.body), resp.body)
self.assertEqual(500, resp.status_int, resp.body)
def test_safe_exceptions_are_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(True)
def test_unsafe_exceptions_are_not_described_in_faults(self):
self._do_test_exception_safety_reflected_in_faults(False)
def _do_test_exception_mapping(self, exception_type, msg):
@webob.dec.wsgify
def fail(req):
raise exception_type(msg)
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertIn(msg, six.text_type(resp.body), resp.body)
self.assertEqual(exception_type.code, resp.status_int, resp.body)
if hasattr(exception_type, 'headers'):
for (key, value) in exception_type.headers.items():
self.assertTrue(key in resp.headers)
self.assertEqual(value, resp.headers[key])
def test_quota_error_mapping(self):
self._do_test_exception_mapping(exception.QuotaError, 'too many used')
def test_non_manila_notfound_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 404
self._do_test_exception_mapping(ExceptionWithCode,
'NotFound')
def test_non_manila_exception_mapping(self):
class ExceptionWithCode(Exception):
code = 417
self._do_test_exception_mapping(ExceptionWithCode,
'Expectation failed')
def test_exception_with_none_code_throws_500(self):
class ExceptionWithNoneCode(Exception):
code = None
@webob.dec.wsgify
def fail(req):
raise ExceptionWithNoneCode()
api = self._wsgi_app(fail)
resp = webob.Request.blank('/').get_response(api)
self.assertEqual(500, resp.status_int)
|
import cloudconvert
api = cloudconvert.Api(
'NrtpV6kD5BdVxJ2R20eBqgCHK6heTjEMxtWSWENeZ9HdKUj5Ew3aCpodMCPPwLeH')
process = api.convert({
'inputformat': 'md',
'outputformat': 'rst',
'input': 'upload',
'file': open('./mytest.md', 'rb')
})
process.wait() # wait until conversion finished
process.download("./mytest.rst") # download output file
|
from django.contrib import admin
from .models import *
from unittester.models import UnitTest
class ApplicationAdmin(admin.ModelAdmin):
list_display = ('order', 'name',)
class TestSectionAdmin(admin.ModelAdmin):
list_display = ('order', 'name', 'app')
list_filter = ('app',)
class UnitTestInline(admin.TabularInline):
model = UnitTest.testcases.through
class TestCaseAdmin(admin.ModelAdmin):
list_display = ('id', 'order', 'name', 'section', 'active',
'autotest_exists')
list_filter = ('section', 'section__app', 'active', 'autotest_exists')
search_fields = ('name', 'description', 'action', 'expected')
inlines = [
UnitTestInline,
]
class TestCaseRunAdmin(admin.ModelAdmin):
list_display = ('id', 'testcase', 'testrun', 'status', 'editor',
'modified')
list_filter = ('testcase__section', 'testrun', 'status', 'testrun__date',
'editor')
class TestRunAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'version', 'date', 'user')
list_filter = ('version', 'date', 'user')
admin.site.register(Application, ApplicationAdmin)
admin.site.register(TestSection, TestSectionAdmin)
admin.site.register(TestCase, TestCaseAdmin)
admin.site.register(TestRun, TestRunAdmin)
admin.site.register(TestCaseRun, TestCaseRunAdmin)
|
import unittest
from tax import calc_tax
class TestCalcTax(unittest.TestCase):
def test_calc_tax_with_ten_percent(self):
self.assertEqual(10, calc_tax(100, 0.1))
def test_calc_tax_with_fourteen_percent_with_almost_equal(self):
self.assertAlmostEqual(14, calc_tax(100, 0.14))
def test_calc_tax_with_incorrect_amount_type_should_raise_error(self):
self.assertRaises(TypeError, calc_tax, 'ten', 0.23)
def test_calc_tax_with_incorrect_tax_rate_type_should_raise_error(self):
self.assertRaises(TypeError, calc_tax, 100, '0.23')
def test_calc_tax_with_incorrect_tax_rate_should_raise_error(self):
self.assertRaises(ValueError, calc_tax, 100, 1.0)
self.assertRaises(ValueError, calc_tax, 100, 0.0)
def test_calc_tax_with_incorrect_negative_amount_should_raise_error(self):
self.assertRaises(ValueError, calc_tax, -100, 0.23) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.